From: Chenguangli chenguangli2@huawei.com
driver inclusion category: feature bugzilla: NA
-----------------------------------------------------------------------
This module includes cfg, cqm, hwdev, hwif, mgmt, sml. and are mainly used to initialize chip capabilityes and to initialize resources for communication between drivers and chip.
Signed-off-by: Chenguangli chenguangli2@huawei.com Acked-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/scsi/huawei/hifc/hifc_api_cmd.c | 1155 ++++++ drivers/scsi/huawei/hifc/hifc_api_cmd.h | 268 ++ drivers/scsi/huawei/hifc/hifc_cfg.c | 823 +++++ drivers/scsi/huawei/hifc/hifc_cfg.h | 171 + drivers/scsi/huawei/hifc/hifc_cmdq.c | 1507 ++++++++ drivers/scsi/huawei/hifc/hifc_cmdq.h | 210 ++ drivers/scsi/huawei/hifc/hifc_cqm_main.c | 694 ++++ drivers/scsi/huawei/hifc/hifc_cqm_main.h | 366 ++ drivers/scsi/huawei/hifc/hifc_cqm_object.c | 3599 +++++++++++++++++++ drivers/scsi/huawei/hifc/hifc_cqm_object.h | 244 ++ drivers/scsi/huawei/hifc/hifc_eqs.c | 1347 +++++++ drivers/scsi/huawei/hifc/hifc_eqs.h | 233 ++ drivers/scsi/huawei/hifc/hifc_hw.h | 611 ++++ drivers/scsi/huawei/hifc/hifc_hwdev.c | 3675 ++++++++++++++++++++ drivers/scsi/huawei/hifc/hifc_hwdev.h | 456 +++ drivers/scsi/huawei/hifc/hifc_hwif.c | 630 ++++ drivers/scsi/huawei/hifc/hifc_hwif.h | 243 ++ drivers/scsi/huawei/hifc/hifc_mgmt.c | 1426 ++++++++ drivers/scsi/huawei/hifc/hifc_mgmt.h | 407 +++ drivers/scsi/huawei/hifc/hifc_sml.c | 361 ++ drivers/scsi/huawei/hifc/hifc_sml.h | 183 + drivers/scsi/huawei/hifc/hifc_wq.c | 624 ++++ drivers/scsi/huawei/hifc/hifc_wq.h | 165 + 23 files changed, 19398 insertions(+) create mode 100644 drivers/scsi/huawei/hifc/hifc_api_cmd.c create mode 100644 drivers/scsi/huawei/hifc/hifc_api_cmd.h create mode 100644 drivers/scsi/huawei/hifc/hifc_cfg.c create mode 100644 drivers/scsi/huawei/hifc/hifc_cfg.h create mode 100644 drivers/scsi/huawei/hifc/hifc_cmdq.c create mode 100644 drivers/scsi/huawei/hifc/hifc_cmdq.h create mode 100644 drivers/scsi/huawei/hifc/hifc_cqm_main.c create mode 100644 drivers/scsi/huawei/hifc/hifc_cqm_main.h create mode 100644 drivers/scsi/huawei/hifc/hifc_cqm_object.c create mode 100644 drivers/scsi/huawei/hifc/hifc_cqm_object.h create mode 100644 drivers/scsi/huawei/hifc/hifc_eqs.c create mode 100644 drivers/scsi/huawei/hifc/hifc_eqs.h create mode 100644 drivers/scsi/huawei/hifc/hifc_hw.h create mode 100644 drivers/scsi/huawei/hifc/hifc_hwdev.c create mode 100644 drivers/scsi/huawei/hifc/hifc_hwdev.h create mode 100644 drivers/scsi/huawei/hifc/hifc_hwif.c create mode 100644 drivers/scsi/huawei/hifc/hifc_hwif.h create mode 100644 drivers/scsi/huawei/hifc/hifc_mgmt.c create mode 100644 drivers/scsi/huawei/hifc/hifc_mgmt.h create mode 100644 drivers/scsi/huawei/hifc/hifc_sml.c create mode 100644 drivers/scsi/huawei/hifc/hifc_sml.h create mode 100644 drivers/scsi/huawei/hifc/hifc_wq.c create mode 100644 drivers/scsi/huawei/hifc/hifc_wq.h
diff --git a/drivers/scsi/huawei/hifc/hifc_api_cmd.c b/drivers/scsi/huawei/hifc/hifc_api_cmd.c new file mode 100644 index 000000000000..22632f779582 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_api_cmd.c @@ -0,0 +1,1155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/semaphore.h> +#include <linux/jiffies.h> +#include <linux/delay.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" + +#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U + +#define API_CMD_CELL_DESC_SIZE 8 +#define API_CMD_CELL_DATA_ADDR_SIZE 8 + +#define API_CHAIN_NUM_CELLS 32 +#define API_CHAIN_CELL_SIZE 128 +#define API_CHAIN_RSP_DATA_SIZE 128 + +#define API_CMD_CELL_WB_ADDR_SIZE 8 + +#define API_CHAIN_CELL_ALIGNMENT 8 + +#define API_CMD_TIMEOUT 10000 +#define API_CMD_STATUS_TIMEOUT 100000 + +#define API_CMD_BUF_SIZE 2048ULL + +#define API_CMD_NODE_ALIGN_SIZE 512ULL +#define API_PAYLOAD_ALIGN_SIZE 64ULL + +#define API_CHAIN_RESP_ALIGNMENT 64ULL + +#define COMPLETION_TIMEOUT_DEFAULT 1000UL +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U + +#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val))) + +#define READ_API_CMD_PRIV_DATA(id, token) (((id) << 16) + (token)) +#define WRITE_API_CMD_PRIV_DATA(id) (((u8)id) << 16) + +#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) + +#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2) +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3) + +enum api_cmd_data_format { + SGL_DATA = 1, +}; + +enum api_cmd_type { + API_CMD_WRITE_TYPE = 0, + API_CMD_READ_TYPE = 1, +}; + +enum api_cmd_bypass { + NOT_BYPASS = 0, + BYPASS = 1, +}; + +enum api_cmd_resp_aeq { + NOT_TRIGGER = 0, + TRIGGER = 1, +}; + +static u8 xor_chksum_set(void *data) +{ + int idx; + u8 checksum = 0; + u8 *val = data; + + for (idx = 0; idx < 7; idx++) + checksum ^= val[idx]; + + return checksum; +} + +static void set_prod_idx(struct hifc_api_cmd_chain *chain) +{ + enum hifc_api_cmd_chain_type chain_type = chain->chain_type; + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 hw_prod_idx_addr = HIFC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); + u32 prod_idx = chain->prod_idx; + + hifc_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx); +} + +static u32 get_hw_cons_idx(struct hifc_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = HIFC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hifc_hwif_read_reg(chain->hwdev->hwif, addr); + + return HIFC_API_CMD_STATUS_GET(val, CONS_IDX); +} + +static void dump_api_chain_reg(struct hifc_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + u32 addr, val; + + addr = HIFC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + val = hifc_hwif_read_reg(chain->hwdev->hwif, addr); + + sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + chain->chain_type, HIFC_API_CMD_STATUS_GET(val, CPLD_ERR), + HIFC_API_CMD_STATUS_GET(val, CHKSUM_ERR), + HIFC_API_CMD_STATUS_GET(val, FSM)); + + sdk_err(dev, "Chain hw current ci: 0x%x\n", + HIFC_API_CMD_STATUS_GET(val, CONS_IDX)); + + addr = HIFC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); + val = hifc_hwif_read_reg(chain->hwdev->hwif, addr); + sdk_err(dev, "Chain hw current pi: 0x%x\n", val); +} + +/** + * chain_busy - check if the chain is still processing last requests + * @chain: chain to check + * Return: 0 - success, negative - failure + **/ +static int chain_busy(struct hifc_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + struct hifc_api_cmd_cell_ctxt *ctxt; + u64 resp_header; + + ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HIFC_API_CMD_MULTI_READ: + case HIFC_API_CMD_POLL_READ: + resp_header = be64_to_cpu(ctxt->resp->header); + if (ctxt->status && + !HIFC_API_CMD_RESP_HEADER_VALID(resp_header)) { + sdk_err(dev, "Context(0x%x) busy!, pi: %d, resp_header: 0x%08x%08x\n", + ctxt->status, chain->prod_idx, + upper_32_bits(resp_header), + lower_32_bits(resp_header)); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + case HIFC_API_CMD_POLL_WRITE: + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + chain->cons_idx = get_hw_cons_idx(chain); + + if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) { + sdk_err(dev, "API CMD chain %d is busy, cons_idx = %d, prod_idx = %d\n", + chain->chain_type, chain->cons_idx, + chain->prod_idx); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + default: + sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type); + return -EINVAL; + } + + return 0; +} + +/** + * get_cell_data_size - get the data size of specific cell type + * @type: chain type + * @cmd_size: the command size + * Return: cell_data_size + **/ +static u16 get_cell_data_size(enum hifc_api_cmd_chain_type type, u16 cmd_size) +{ + u16 cell_data_size = 0; + + switch (type) { + case HIFC_API_CMD_POLL_READ: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_WB_ADDR_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + case HIFC_API_CMD_POLL_WRITE: + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + default: + break; + } + + return cell_data_size; +} + +/** + * prepare_cell_ctrl - prepare the ctrl of the cell for the command + * @cell_ctrl: the control of the cell to set the control into it + * @cell_len: the size of the cell + **/ +static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) +{ + u64 ctrl; + u8 chksum; + + ctrl = HIFC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) | + HIFC_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + HIFC_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); + + chksum = xor_chksum_set(&ctrl); + + ctrl |= HIFC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *cell_ctrl = cpu_to_be64(ctrl); +} + +/** + * prepare_api_cmd - prepare API CMD command + * @chain: chain for the command + * @cell: the cell of the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + **/ +static void prepare_api_cmd(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_cell *cell, + enum hifc_node_id dest, + const void *cmd, u16 cmd_size) +{ + struct hifc_api_cmd_cell_ctxt *cell_ctxt; + u32 priv; + + cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HIFC_API_CMD_POLL_READ: + priv = READ_API_CMD_PRIV_DATA(chain->chain_type, + cell_ctxt->saved_prod_idx); + cell->desc = HIFC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HIFC_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) | + HIFC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HIFC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + HIFC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HIFC_API_CMD_POLL_WRITE: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HIFC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HIFC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HIFC_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HIFC_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + HIFC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HIFC_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HIFC_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HIFC_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) | + HIFC_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) | + HIFC_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + default: + sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n", + chain->chain_type); + return; + } + + cell->desc |= HIFC_API_CMD_DESC_SET(dest, DEST) | + HIFC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); + + cell->desc |= HIFC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), + XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + cell->desc = cpu_to_be64(cell->desc); + + memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); +} + +/** + * prepare_cell - prepare cell ctrl and cmd in the current producer cell + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + **/ +static void prepare_cell(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, + void *cmd, u16 cmd_size) +{ + struct hifc_api_cmd_cell *curr_node; + u16 cell_size; + + curr_node = chain->curr_node; + + cell_size = get_cell_data_size(chain->chain_type, cmd_size); + + prepare_cell_ctrl(&curr_node->ctrl, cell_size); + prepare_api_cmd(chain, curr_node, dest, cmd, cmd_size); +} + +static inline void cmd_chain_prod_idx_inc(struct hifc_api_cmd_chain *chain) +{ + chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); +} + +static void issue_api_cmd(struct hifc_api_cmd_chain *chain) +{ + set_prod_idx(chain); +} + +/** + * api_cmd_status_update - update the status of the chain + * @chain: chain to update + **/ +static void api_cmd_status_update(struct hifc_api_cmd_chain *chain) +{ + struct hifc_api_cmd_status *wb_status; + enum hifc_api_cmd_chain_type chain_type; + u64 status_header; + u32 buf_desc; + + wb_status = chain->wb_status; + + buf_desc = be32_to_cpu(wb_status->buf_desc); + if (HIFC_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR)) + return; + + status_header = be64_to_cpu(wb_status->header); + chain_type = HIFC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); + if (chain_type >= HIFC_API_CMD_MAX) + return; + + if (chain_type != chain->chain_type) + return; + + chain->cons_idx = HIFC_API_CMD_STATUS_GET(buf_desc, CONS_IDX); +} + +/** + * wait_for_status_poll - wait for write to mgmt command to complete + * @chain: the chain of the command + * Return: 0 - success, negative - failure + **/ +static int wait_for_status_poll(struct hifc_api_cmd_chain *chain) +{ + int err = -ETIMEDOUT; + u32 cnt = 0; + + while (cnt < API_CMD_STATUS_TIMEOUT && + chain->hwdev->chip_present_flag) { + api_cmd_status_update(chain); + + /* SYNC API CMD cmd should start after prev cmd finished */ + if (chain->cons_idx == chain->prod_idx) { + err = 0; + break; + } + + usleep_range(50, 100); + cnt++; + } + + return err; +} + +static void copy_resp_data(struct hifc_api_cmd_cell_ctxt *ctxt, void *ack, + u16 ack_size) +{ + struct hifc_api_cmd_resp_fmt *resp = ctxt->resp; + + memcpy(ack, &resp->resp_data, ack_size); + ctxt->status = 0; +} + +/** + * prepare_cell - polling for respense data of the read api-command + * @chain: pointer to api cmd chain + * + * Return: 0 - success, negative - failure + **/ +static int wait_for_resp_polling(struct hifc_api_cmd_cell_ctxt *ctxt) +{ + u64 resp_header; + int ret = -ETIMEDOUT; + u32 cnt = 0; + + while (cnt < POLLING_COMPLETION_TIMEOUT_DEFAULT) { + resp_header = be64_to_cpu(ctxt->resp->header); + + rmb(); /* read the latest header */ + + if (HIFC_API_CMD_RESP_HEADER_VALID(resp_header)) { + ret = 0; + break; + } + usleep_range(100, 1000); + cnt++; + } + + if (ret) + pr_err("Wait for api chain response timeout\n"); + + return ret; +} + +/** + * wait_for_api_cmd_completion - wait for command to complete + * @chain: chain for the command + * Return: 0 - success, negative - failure + **/ +static int wait_for_api_cmd_completion(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_cell_ctxt *ctxt, + void *ack, u16 ack_size) +{ + void *dev = chain->hwdev->dev_hdl; + int err = 0; + + switch (chain->chain_type) { + case HIFC_API_CMD_POLL_READ: + err = wait_for_resp_polling(ctxt); + if (!err) + copy_resp_data(ctxt, ack, ack_size); + break; + case HIFC_API_CMD_POLL_WRITE: + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + err = wait_for_status_poll(chain); + if (err) { + sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n", + chain->chain_type); + break; + } + break; + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* No need to wait */ + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + if (err) + dump_api_chain_reg(chain); + + return err; +} + +static inline void update_api_cmd_ctxt(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_cell_ctxt *ctxt) +{ + ctxt->status = 1; + ctxt->saved_prod_idx = chain->prod_idx; + if (ctxt->resp) { + ctxt->resp->header = 0; + + /* make sure "header" was cleared */ + wmb(); + } +} + +/** + * api_cmd - API CMD command + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +static int api_cmd(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, + void *cmd, u16 cmd_size, void *ack, u16 ack_size) +{ + struct hifc_api_cmd_cell_ctxt *ctxt; + + if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock(&chain->async_lock); + else + down(&chain->sem); + ctxt = &chain->cell_ctxt[chain->prod_idx]; + if (chain_busy(chain)) { + if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + return -EBUSY; + } + update_api_cmd_ctxt(chain, ctxt); + + prepare_cell(chain, dest, cmd, cmd_size); + + cmd_chain_prod_idx_inc(chain); + + wmb(); /* issue the command */ + + issue_api_cmd(chain); + + /* incremented prod idx, update ctxt */ + + chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr; + if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + + return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size); +} + +/** + * hifc_api_cmd_write - Write API CMD command + * @chain: chain for write command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +int hifc_api_cmd_write(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, void *cmd, u16 size) +{ + /* Verify the chain type */ + return api_cmd(chain, dest, cmd, size, NULL, 0); +} + +int hifc_api_cmd_read(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, + void *cmd, u16 size, void *ack, u16 ack_size) +{ + return api_cmd(chain, dest, cmd, size, ack, ack_size); +} + +/** + * api_cmd_hw_restart - restart the chain in the HW + * @chain: the API CMD specific chain to restart + **/ +static int api_cmd_hw_restart(struct hifc_api_cmd_chain *cmd_chain) +{ + struct hifc_hwif *hwif = cmd_chain->hwdev->hwif; + u32 reg_addr, val; + int err; + u32 cnt = 0; + + /* Read Modify Write */ + reg_addr = HIFC_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); + val = hifc_hwif_read_reg(hwif, reg_addr); + + val = HIFC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); + val |= HIFC_API_CMD_CHAIN_REQ_SET(1, RESTART); + + hifc_hwif_write_reg(hwif, reg_addr, val); + + err = -ETIMEDOUT; + while (cnt < API_CMD_TIMEOUT) { + val = hifc_hwif_read_reg(hwif, reg_addr); + + if (!HIFC_API_CMD_CHAIN_REQ_GET(val, RESTART)) { + err = 0; + break; + } + + usleep_range(900, 1000); + cnt++; + } + + return err; +} + +/** + * api_cmd_ctrl_init - set the control register of a chain + * @chain: the API CMD specific chain to set control register for + **/ +static void api_cmd_ctrl_init(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 reg_addr, ctrl; + u32 size; + + /* Read Modify Write */ + reg_addr = HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT); + + ctrl = hifc_hwif_read_reg(hwif, reg_addr); + + ctrl = HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + ctrl |= HIFC_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) | + HIFC_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE); + + hifc_hwif_write_reg(hwif, reg_addr, ctrl); +} + +/** + * api_cmd_set_status_addr - set the status address of a chain in the HW + * @chain: the API CMD specific chain to set status address for + **/ +static void api_cmd_set_status_addr(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HIFC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->wb_status_paddr); + hifc_hwif_write_reg(hwif, addr, val); + + addr = HIFC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->wb_status_paddr); + hifc_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_set_num_cells - set the number cells of a chain in the HW + * @chain: the API CMD specific chain to set the number of cells for + **/ +static void api_cmd_set_num_cells(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HIFC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); + val = chain->num_cells; + hifc_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_head_init - set the head cell of a chain in the HW + * @chain: the API CMD specific chain to set the head for + **/ +static void api_cmd_head_init(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HIFC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->head_cell_paddr); + hifc_hwif_write_reg(hwif, addr, val); + + addr = HIFC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->head_cell_paddr); + hifc_hwif_write_reg(hwif, addr, val); +} + +/** + * wait_for_ready_chain - wait for the chain to be ready + * @chain: the API CMD specific chain to wait for + * Return: 0 - success, negative - failure + **/ +static int wait_for_ready_chain(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + u32 hw_cons_idx; + u32 cnt = 0; + int err; + + addr = HIFC_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); + err = -ETIMEDOUT; + while (cnt < API_CMD_TIMEOUT) { + val = hifc_hwif_read_reg(hwif, addr); + hw_cons_idx = HIFC_API_CMD_STATUS_GET(val, CONS_IDX); + + /* wait for HW cons idx to be updated */ + if (hw_cons_idx == chain->cons_idx) { + err = 0; + break; + } + + usleep_range(900, 1000); + cnt++; + } + + return err; +} + +/** + * api_cmd_chain_hw_clean - clean the HW + * @chain: the API CMD specific chain + **/ +static void api_cmd_chain_hw_clean(struct hifc_api_cmd_chain *chain) +{ + struct hifc_hwif *hwif = chain->hwdev->hwif; + u32 addr, ctrl; + + addr = HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + ctrl = hifc_hwif_read_reg(hwif, addr); + ctrl = HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) & + HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + HIFC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + hifc_hwif_write_reg(hwif, addr, ctrl); +} + +/** + * api_cmd_chain_hw_init - initialize the chain in the HW + * @chain: the API CMD specific chain to initialize in HW + * Return: 0 - success, negative - failure + **/ +static int api_cmd_chain_hw_init(struct hifc_api_cmd_chain *chain) +{ + api_cmd_chain_hw_clean(chain); + + api_cmd_set_status_addr(chain); + + if (api_cmd_hw_restart(chain)) { + sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n"); + return -EBUSY; + } + + api_cmd_ctrl_init(chain); + api_cmd_set_num_cells(chain); + api_cmd_head_init(chain); + + return wait_for_ready_chain(chain); +} + +/** + * alloc_cmd_buf - allocate a dma buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + * Return: 0 - success, negative - failure + **/ +static int alloc_cmd_buf(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_cell *cell, u32 cell_idx) +{ + struct hifc_api_cmd_cell_ctxt *cell_ctxt; + void *dev = chain->hwdev->dev_hdl; + void *buf_vaddr; + u64 buf_paddr; + int err = 0; + + buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base + + chain->buf_size_align * cell_idx); + buf_paddr = chain->buf_paddr_base + + chain->buf_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->api_cmd_vaddr = buf_vaddr; + + /* set the cmd DMA address in the cell */ + switch (chain->chain_type) { + case HIFC_API_CMD_POLL_READ: + cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + case HIFC_API_CMD_POLL_WRITE: + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* The data in the HW should be in Big Endian Format */ + cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + return err; +} + +static void alloc_resp_buf(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_cell *cell, u32 cell_idx) +{ + struct hifc_api_cmd_cell_ctxt *cell_ctxt; + void *resp_vaddr; + u64 resp_paddr; + + resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * cell_idx); + resp_paddr = chain->rsp_paddr_base + + chain->rsp_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->resp = resp_vaddr; + cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr); +} + +static int hifc_alloc_api_cmd_cell_buf(struct hifc_api_cmd_chain *chain, + u32 cell_idx, + struct hifc_api_cmd_cell *node) +{ + void *dev = chain->hwdev->dev_hdl; + int err; + + /* For read chain, we should allocate buffer for the response data */ + if (chain->chain_type == HIFC_API_CMD_MULTI_READ || + chain->chain_type == HIFC_API_CMD_POLL_READ) + alloc_resp_buf(chain, node, cell_idx); + + switch (chain->chain_type) { + case HIFC_API_CMD_WRITE_TO_MGMT_CPU: + case HIFC_API_CMD_POLL_WRITE: + case HIFC_API_CMD_POLL_READ: + case HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + err = alloc_cmd_buf(chain, node, cell_idx); + if (err) { + sdk_err(dev, "Failed to allocate cmd buffer\n"); + goto alloc_cmd_buf_err; + } + break; + /* For api command write and api command read, the data section + * is directly inserted in the cell, so no need to allocate. + */ + case HIFC_API_CMD_MULTI_READ: + chain->cell_ctxt[cell_idx].api_cmd_vaddr = + &node->read.hw_cmd_paddr; + break; + default: + sdk_err(dev, "Unsupported API CMD chain type\n"); + err = -EINVAL; + goto alloc_cmd_buf_err; + } + + return 0; + +alloc_cmd_buf_err: + + return err; +} + +/** + * api_cmd_create_cell - create API CMD cell of specific chain + * @chain: the API CMD specific chain to create its cell + * @cell_idx: the cell index to create + * @pre_node: previous cell + * @node_vaddr: the virt addr of the cell + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_cell(struct hifc_api_cmd_chain *chain, u32 cell_idx, + struct hifc_api_cmd_cell *pre_node, + struct hifc_api_cmd_cell **node_vaddr) +{ + struct hifc_api_cmd_cell_ctxt *cell_ctxt; + struct hifc_api_cmd_cell *node; + void *cell_vaddr; + u64 cell_paddr; + int err; + + cell_vaddr = (void *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * cell_idx); + cell_paddr = chain->cell_paddr_base + + chain->cell_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + cell_ctxt->cell_vaddr = cell_vaddr; + node = cell_ctxt->cell_vaddr; + + if (!pre_node) { + chain->head_node = cell_vaddr; + chain->head_cell_paddr = cell_paddr; + } else { + /* The data in the HW should be in Big Endian Format */ + pre_node->next_cell_paddr = cpu_to_be64(cell_paddr); + } + + /* Driver software should make sure that there is an empty API + * command cell at the end the chain + */ + node->next_cell_paddr = 0; + + err = hifc_alloc_api_cmd_cell_buf(chain, cell_idx, node); + if (err) + return err; + + *node_vaddr = node; + + return 0; +} + +/** + * api_cmd_create_cells - create API CMD cells for specific chain + * @chain: the API CMD specific chain + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_cells(struct hifc_api_cmd_chain *chain) +{ + struct hifc_api_cmd_cell *node = NULL, *pre_node = NULL; + void *dev = chain->hwdev->dev_hdl; + u32 cell_idx; + int err; + + for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { + err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); + if (err) { + sdk_err(dev, "Failed to create API CMD cell\n"); + return err; + } + + pre_node = node; + } + + if (!node) + return -EFAULT; + + /* set the Final node to point on the start */ + node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); + + /* set the current node to be the head */ + chain->curr_node = chain->head_node; + return 0; +} + +/** + * api_chain_init - initialize API CMD specific chain + * @chain: the API CMD specific chain to initialize + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + **/ +static int api_chain_init(struct hifc_api_cmd_chain *chain, + struct hifc_api_cmd_chain_attr *attr) +{ + void *dev = chain->hwdev->dev_hdl; + size_t cell_ctxt_size; + size_t cells_buf_size; + int err; + + chain->chain_type = attr->chain_type; + chain->num_cells = attr->num_cells; + chain->cell_size = attr->cell_size; + chain->rsp_size = attr->rsp_size; + + chain->prod_idx = 0; + chain->cons_idx = 0; + + if (chain->chain_type == HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_init(&chain->async_lock); + else + sema_init(&chain->sem, 1); + + cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); + if (!cell_ctxt_size) { + sdk_err(dev, "Api chain cell size cannot be zero\n"); + err = -EINVAL; + goto alloc_cell_ctxt_err; + } + + chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL); + if (!chain->cell_ctxt) { + sdk_err(dev, "Failed to allocate cell contexts for a chain\n"); + err = -ENOMEM; + goto alloc_cell_ctxt_err; + } + + chain->wb_status = dma_zalloc_coherent(dev, + sizeof(*chain->wb_status), + &chain->wb_status_paddr, + GFP_KERNEL); + if (!chain->wb_status) { + sdk_err(dev, "Failed to allocate DMA wb status\n"); + err = -ENOMEM; + goto alloc_wb_status_err; + } + + chain->cell_size_align = ALIGN((u64)chain->cell_size, + API_CMD_NODE_ALIGN_SIZE); + chain->rsp_size_align = ALIGN((u64)chain->rsp_size, + API_CHAIN_RESP_ALIGNMENT); + chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE); + + cells_buf_size = (chain->cell_size_align + chain->rsp_size_align + + chain->buf_size_align) * chain->num_cells; + + err = hifc_dma_zalloc_coherent_align(dev, cells_buf_size, + API_CMD_NODE_ALIGN_SIZE, + GFP_KERNEL, + &chain->cells_addr); + if (err) { + sdk_err(dev, "Failed to allocate API CMD cells buffer\n"); + goto alloc_cells_buf_err; + } + + chain->cell_vaddr_base = chain->cells_addr.align_vaddr; + chain->cell_paddr_base = chain->cells_addr.align_paddr; + + chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base + + chain->cell_size_align * chain->num_cells); + chain->rsp_paddr_base = chain->cell_paddr_base + + chain->cell_size_align * chain->num_cells; + + chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base + + chain->rsp_size_align * chain->num_cells); + chain->buf_paddr_base = chain->rsp_paddr_base + + chain->rsp_size_align * chain->num_cells; + + return 0; + +alloc_cells_buf_err: + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + +alloc_wb_status_err: + kfree(chain->cell_ctxt); + +alloc_cell_ctxt_err: + return err; +} + +/** + * api_chain_free - free API CMD specific chain + * @chain: the API CMD specific chain to free + **/ +static void api_chain_free(struct hifc_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + + hifc_dma_free_coherent_align(dev, &chain->cells_addr); + + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + kfree(chain->cell_ctxt); +} + +/** + * api_cmd_create_chain - create API CMD specific chain + * @chain: the API CMD specific chain to create + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_chain(struct hifc_api_cmd_chain **cmd_chain, + struct hifc_api_cmd_chain_attr *attr) +{ + struct hifc_hwdev *hwdev = attr->hwdev; + struct hifc_api_cmd_chain *chain; + int err; + + if (attr->num_cells & (attr->num_cells - 1)) { + sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n"); + return -EINVAL; + } + + chain = kzalloc(sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + chain->hwdev = hwdev; + + err = api_chain_init(chain, attr); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n"); + goto chain_init_err; + } + + err = api_cmd_create_cells(chain); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n"); + goto create_cells_err; + } + + err = api_cmd_chain_hw_init(chain); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n"); + goto chain_hw_init_err; + } + + *cmd_chain = chain; + return 0; + +chain_hw_init_err: +create_cells_err: + api_chain_free(chain); + +chain_init_err: + kfree(chain); + return err; +} + +/** + * api_cmd_destroy_chain - destroy API CMD specific chain + * @chain: the API CMD specific chain to destroy + **/ +static void api_cmd_destroy_chain(struct hifc_api_cmd_chain *chain) +{ + api_chain_free(chain); + kfree(chain); +} + +/** + * hifc_api_cmd_init - Initialize all the API CMD chains + * @hwdev: the pointer to hw device + * @chain: the API CMD chains that will be initialized + * Return: 0 - success, negative - failure + **/ +int hifc_api_cmd_init(struct hifc_hwdev *hwdev, + struct hifc_api_cmd_chain **chain) +{ + void *dev = hwdev->dev_hdl; + struct hifc_api_cmd_chain_attr attr; + enum hifc_api_cmd_chain_type chain_type, i; + int err; + + attr.hwdev = hwdev; + attr.num_cells = API_CHAIN_NUM_CELLS; + attr.cell_size = API_CHAIN_CELL_SIZE; + attr.rsp_size = API_CHAIN_RSP_DATA_SIZE; + + chain_type = HIFC_API_CMD_WRITE_TO_MGMT_CPU; + for (; chain_type < HIFC_API_CMD_MAX; chain_type++) { + attr.chain_type = chain_type; + + err = api_cmd_create_chain(&chain[chain_type], &attr); + if (err) { + sdk_err(dev, "Failed to create chain %d\n", chain_type); + goto create_chain_err; + } + } + + return 0; + +create_chain_err: + i = HIFC_API_CMD_WRITE_TO_MGMT_CPU; + for (; i < chain_type; i++) + api_cmd_destroy_chain(chain[i]); + + return err; +} + +/** + * hifc_api_cmd_free - free the API CMD chains + * @chain: the API CMD chains that will be freed + **/ +void hifc_api_cmd_free(struct hifc_api_cmd_chain **chain) +{ + enum hifc_api_cmd_chain_type chain_type; + + chain_type = HIFC_API_CMD_WRITE_TO_MGMT_CPU; + + for (; chain_type < HIFC_API_CMD_MAX; chain_type++) + api_cmd_destroy_chain(chain[chain_type]); +} + diff --git a/drivers/scsi/huawei/hifc/hifc_api_cmd.h b/drivers/scsi/huawei/hifc/hifc_api_cmd.h new file mode 100644 index 000000000000..bd14db34a119 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_api_cmd.h @@ -0,0 +1,268 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_API_CMD_H_ +#define HIFC_API_CMD_H_ + +#define HIFC_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0 +#define HIFC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define HIFC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define HIFC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 + +#define HIFC_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU +#define HIFC_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define HIFC_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define HIFC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define HIFC_API_CMD_CELL_CTRL_SET(val, member) \ + ((((u64)val) & HIFC_API_CMD_CELL_CTRL_##member##_MASK) << \ + HIFC_API_CMD_CELL_CTRL_##member##_SHIFT) + +#define HIFC_API_CMD_DESC_API_TYPE_SHIFT 0 +#define HIFC_API_CMD_DESC_RD_WR_SHIFT 1 +#define HIFC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 +#define HIFC_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3 +#define HIFC_API_CMD_DESC_PRIV_DATA_SHIFT 8 +#define HIFC_API_CMD_DESC_DEST_SHIFT 32 +#define HIFC_API_CMD_DESC_SIZE_SHIFT 40 +#define HIFC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 + +#define HIFC_API_CMD_DESC_API_TYPE_MASK 0x1U +#define HIFC_API_CMD_DESC_RD_WR_MASK 0x1U +#define HIFC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U +#define HIFC_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U +#define HIFC_API_CMD_DESC_DEST_MASK 0x1FU +#define HIFC_API_CMD_DESC_SIZE_MASK 0x7FFU +#define HIFC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU +#define HIFC_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU + +#define HIFC_API_CMD_DESC_SET(val, member) \ + ((((u64)val) & HIFC_API_CMD_DESC_##member##_MASK) << \ + HIFC_API_CMD_DESC_##member##_SHIFT) +#define HIFC_API_CMD_STATUS_HEADER_VALID_SHIFT 0 +#define HIFC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 + +#define HIFC_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU +#define HIFC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU +#define HIFC_API_CMD_STATUS_HEADER_GET(val, member) \ + (((val) >> HIFC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ + HIFC_API_CMD_STATUS_HEADER_##member##_MASK) +#define HIFC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 +#define HIFC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U +#define HIFC_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U +#define HIFC_API_CMD_CHAIN_REQ_SET(val, member) \ + (((val) & HIFC_API_CMD_CHAIN_REQ_##member##_MASK) << \ + HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT) + +#define HIFC_API_CMD_CHAIN_REQ_GET(val, member) \ + (((val) >> HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ + HIFC_API_CMD_CHAIN_REQ_##member##_MASK) + +#define HIFC_API_CMD_CHAIN_REQ_CLEAR(val, member) \ + ((val) & (~(HIFC_API_CMD_CHAIN_REQ_##member##_MASK \ + << HIFC_API_CMD_CHAIN_REQ_##member##_SHIFT))) + +#define HIFC_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1 +#define HIFC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 +#define HIFC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 +#define HIFC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 +#define HIFC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 +#define HIFC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 + +#define HIFC_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U +#define HIFC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U +#define HIFC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U +#define HIFC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U +#define HIFC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U +#define HIFC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U + +#define HIFC_API_CMD_CHAIN_CTRL_SET(val, member) \ + (((val) & HIFC_API_CMD_CHAIN_CTRL_##member##_MASK) << \ + HIFC_API_CMD_CHAIN_CTRL_##member##_SHIFT) + +#define HIFC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ + ((val) & (~(HIFC_API_CMD_CHAIN_CTRL_##member##_MASK \ + << HIFC_API_CMD_CHAIN_CTRL_##member##_SHIFT))) + +#define HIFC_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define HIFC_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define HIFC_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & HIFC_API_CMD_RESP_HEAD_VALID_MASK) == \ + HIFC_API_CMD_RESP_HEAD_VALID_CODE) +#define HIFC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU +#define HIFC_API_CMD_STATUS_CONS_IDX_SHIFT 0 +#define HIFC_API_CMD_STATUS_FSM_MASK 0xFU +#define HIFC_API_CMD_STATUS_FSM_SHIFT 24 +#define HIFC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U +#define HIFC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 +#define HIFC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U +#define HIFC_API_CMD_STATUS_CPLD_ERR_SHIFT 30 + +#define HIFC_API_CMD_STATUS_GET(val, member) \ + (((val) >> HIFC_API_CMD_STATUS_##member##_SHIFT) & \ + HIFC_API_CMD_STATUS_##member##_MASK) + +/* API CMD registers */ +#define HIFC_CSR_API_CMD_BASE 0xF000 + +#define HIFC_CSR_API_CMD_STRIDE 0x100 + +#define HIFC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x0 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x4 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_STATUS_HI_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x8 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_STATUS_LO_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0xC + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x10 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x14 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x1C + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x20 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +#define HIFC_CSR_API_CMD_STATUS_0_ADDR(idx) \ + (HIFC_CSR_API_CMD_BASE + 0x30 + (idx) * HIFC_CSR_API_CMD_STRIDE) + +enum hifc_api_cmd_chain_type { + /* write command with completion notification */ + HIFC_API_CMD_WRITE = 0, + /* read command with completion notification */ + HIFC_API_CMD_READ = 1, + /* write to mgmt cpu command with completion */ + HIFC_API_CMD_WRITE_TO_MGMT_CPU = 2, + /* multi read command with completion notification - not used */ + HIFC_API_CMD_MULTI_READ = 3, + /* write command without completion notification */ + HIFC_API_CMD_POLL_WRITE = 4, + /* read command without completion notification */ + HIFC_API_CMD_POLL_READ = 5, + /* read from mgmt cpu command with completion */ + HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6, + HIFC_API_CMD_MAX, +}; + +struct hifc_api_cmd_status { + u64 header; + u32 buf_desc; + u32 cell_addr_hi; + u32 cell_addr_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct hifc_api_cmd_cell { + u64 ctrl; + + /* address is 64 bit in HW struct */ + u64 next_cell_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_cmd_paddr; + } write; + + struct { + u64 hw_wb_resp_paddr; + u64 hw_cmd_paddr; + } read; + }; +}; + +struct hifc_api_cmd_resp_fmt { + u64 header; + u64 rsvd[3]; + u64 resp_data; +}; + +struct hifc_api_cmd_cell_ctxt { + struct hifc_api_cmd_cell *cell_vaddr; + + void *api_cmd_vaddr; + + struct hifc_api_cmd_resp_fmt *resp; + + struct completion done; + int status; + + u32 saved_prod_idx; +}; + +struct hifc_api_cmd_chain_attr { + struct hifc_hwdev *hwdev; + enum hifc_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 rsp_size; + u16 cell_size; +}; + +struct hifc_api_cmd_chain { + struct hifc_hwdev *hwdev; + enum hifc_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; + u16 rsp_size; + + /* HW members is 24 bit format */ + u32 prod_idx; + u32 cons_idx; + + struct semaphore sem; + /* Async cmd can not be scheduling */ + spinlock_t async_lock; + + dma_addr_t wb_status_paddr; + struct hifc_api_cmd_status *wb_status; + + dma_addr_t head_cell_paddr; + struct hifc_api_cmd_cell *head_node; + + struct hifc_api_cmd_cell_ctxt *cell_ctxt; + struct hifc_api_cmd_cell *curr_node; + + struct hifc_dma_addr_align cells_addr; + + u8 *cell_vaddr_base; + u64 cell_paddr_base; + u8 *rsp_vaddr_base; + u64 rsp_paddr_base; + u8 *buf_vaddr_base; + u64 buf_paddr_base; + u64 cell_size_align; + u64 rsp_size_align; + u64 buf_size_align; +}; + +int hifc_api_cmd_write(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, void *cmd, u16 size); + +int hifc_api_cmd_read(struct hifc_api_cmd_chain *chain, + enum hifc_node_id dest, void *cmd, u16 size, + void *ack, u16 ack_size); + +int hifc_api_cmd_init(struct hifc_hwdev *hwdev, + struct hifc_api_cmd_chain **chain); + +void hifc_api_cmd_free(struct hifc_api_cmd_chain **chain); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_cfg.c b/drivers/scsi/huawei/hifc/hifc_cfg.c new file mode 100644 index 000000000000..5ebe5d754c41 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cfg.c @@ -0,0 +1,823 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/completion.h> +#include <linux/semaphore.h> +#include <linux/vmalloc.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_cqm_main.h" +#include "hifc_api_cmd.h" +#include "hifc_hw.h" +#include "hifc_mgmt.h" +#include "hifc_cfg.h" + +uint intr_mode; + +int hifc_sync_time(void *hwdev, u64 time) +{ + struct hifc_sync_time_info time_info = {0}; + u16 out_size = sizeof(time_info); + int err; + + time_info.mstime = time; + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_SYNC_TIME, &time_info, + sizeof(time_info), &time_info, &out_size, + 0); + if (err || time_info.status || !out_size) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n", + err, time_info.status, out_size); + } + + return err; +} + +static void parse_pub_res_cap(struct service_cap *cap, + struct hifc_dev_cap *dev_cap, + enum func_type type) +{ + cap->port_id = dev_cap->port_id; + cap->force_up = dev_cap->force_up; + + pr_info("Get public resource capbility, force_up: 0x%x\n", + cap->force_up); + /* FC need max queue number, but max queue number info is in + * l2nic cap, we also put max queue num info in public cap, so + * FC can get correct max queue number info. + */ + cap->max_sqs = dev_cap->nic_max_sq + 1; + cap->max_rqs = dev_cap->nic_max_rq + 1; + + cap->host_total_function = dev_cap->host_total_func; + cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val; + cap->max_connect_num = dev_cap->max_conn_num; + cap->max_stick2cache_num = dev_cap->max_stick2cache_num; + + pr_info("Get public resource capbility, svc_cap_en: 0x%x\n", + dev_cap->svc_cap_en); + pr_info("port_id=0x%x\n", cap->port_id); + pr_info("Host_total_function=0x%x, host_oq_id_mask_val=0x%x\n", + cap->host_total_function, cap->host_oq_id_mask_val); +} + +static void parse_fc_res_cap(struct service_cap *cap, + struct hifc_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap; + + fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx; + fc_cap->scq_num = dev_cap->fc_max_scq; + fc_cap->srq_num = dev_cap->fc_max_srq; + fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx; + fc_cap->vp_id_start = dev_cap->fc_vp_id_start; + fc_cap->vp_id_end = dev_cap->fc_vp_id_end; + + pr_info("Get fc resource capbility\n"); + pr_info("Max_parent_qpc_num=0x%x, scq_num=0x%x, srq_num=0x%x, max_child_qpc_num=0x%x\n", + fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num, + fc_cap->max_child_qpc_num); + pr_info("Vp_id_start=0x%x, vp_id_end=0x%x\n", + fc_cap->vp_id_start, fc_cap->vp_id_end); +} + +static void parse_dev_cap(struct hifc_hwdev *dev, + struct hifc_dev_cap *dev_cap, enum func_type type) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + + /* Public resource */ + parse_pub_res_cap(cap, dev_cap, type); + + /* PPF managed dynamic resource */ + + parse_fc_res_cap(cap, dev_cap, type); +} + +static int get_cap_from_fw(struct hifc_hwdev *dev, enum func_type type) +{ + struct hifc_dev_cap dev_cap = {0}; + u16 out_len = sizeof(dev_cap); + int err; + + dev_cap.version = HIFC_CMD_VER_FUNC_ID; + err = hifc_global_func_id_get(dev, &dev_cap.func_id); + if (err) + return err; + + sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %d\n", + dev_cap.func_id); + + err = hifc_msg_to_mgmt_sync(dev, HIFC_MOD_CFGM, HIFC_CFG_NIC_CAP, + &dev_cap, sizeof(dev_cap), + &dev_cap, &out_len, 0); + if (err || dev_cap.status || !out_len) { + sdk_err(dev->dev_hdl, + "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n", + err, dev_cap.status, out_len); + return -EFAULT; + } + + parse_dev_cap(dev, &dev_cap, type); + return 0; +} + +static void fc_param_fix(struct hifc_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct fc_service_cap *fc_cap = &cap->fc_cap; + + fc_cap->parent_qpc_size = FC_PCTX_SZ; + fc_cap->child_qpc_size = FC_CCTX_SZ; + fc_cap->sqe_size = FC_SQE_SZ; + + fc_cap->scqc_size = FC_SCQC_SZ; + fc_cap->scqe_size = FC_SCQE_SZ; + + fc_cap->srqc_size = FC_SRQC_SZ; + fc_cap->srqe_size = FC_SRQE_SZ; +} + +static void cfg_get_eq_num(struct hifc_hwdev *dev) +{ + struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info; + + eq_info->num_ceq = dev->hwif->attr.num_ceqs; + eq_info->num_ceq_remain = eq_info->num_ceq; +} + +static int cfg_init_eq(struct hifc_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_eq *eq; + u8 num_ceq, i = 0; + + cfg_get_eq_num(dev); + num_ceq = cfg_mgmt->eq_info.num_ceq; + + sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n", + cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain); + + if (!num_ceq) { + sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n"); + return -EFAULT; + } + eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL); + if (!eq) + return -ENOMEM; + + for (i = 0; i < num_ceq; ++i) { + eq[i].eqn = i; + eq[i].free = CFG_FREE; + eq[i].type = SERVICE_T_MAX; + } + + cfg_mgmt->eq_info.eq = eq; + + mutex_init(&cfg_mgmt->eq_info.eq_mutex); + + return 0; +} + +static int cfg_init_interrupt(struct hifc_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info; + u16 intr_num = dev->hwif->attr.num_irqs; + + if (!intr_num) { + sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n"); + return -EFAULT; + } + irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info), + GFP_KERNEL); + if (!irq_info->alloc_info) + return -ENOMEM; + + irq_info->num_irq_hw = intr_num; + + cfg_mgmt->svc_cap.interrupt_type = intr_mode; + + mutex_init(&irq_info->irq_mutex); + + return 0; +} + +static int cfg_enable_interrupt(struct hifc_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw; + + void *pcidev = dev->pcidev_hdl; + struct irq_alloc_info_st *irq_info; + struct msix_entry *entry; + u16 i = 0; + int actual_irq; + + irq_info = cfg_mgmt->irq_param_info.alloc_info; + + sdk_info(dev->dev_hdl, "Interrupt type: %d, irq num: %d.\n", + cfg_mgmt->svc_cap.interrupt_type, nreq); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + + if (!nreq) { + sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n"); + return -EINVAL; + } + entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + for (i = 0; i < nreq; i++) + entry[i].entry = i; + + actual_irq = pci_enable_msix_range(pcidev, entry, + VECTOR_THRESHOLD, nreq); + if (actual_irq < 0) { + sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed.\n"); + kfree(entry); + return -ENOMEM; + } + + nreq = (u16)actual_irq; + cfg_mgmt->irq_param_info.num_total = nreq; + cfg_mgmt->irq_param_info.num_irq_remain = nreq; + sdk_info(dev->dev_hdl, "Request %d msix vector success.\n", + nreq); + + for (i = 0; i < nreq; ++i) { + /* u16 driver uses to specify entry, OS writes */ + irq_info[i].info.msix_entry_idx = entry[i].entry; + /* u32 kernel uses to write allocated vector */ + irq_info[i].info.irq_id = entry[i].vector; + irq_info[i].type = SERVICE_T_MAX; + irq_info[i].free = CFG_FREE; + } + + kfree(entry); + + break; + + default: + sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n", + cfg_mgmt->svc_cap.interrupt_type); + break; + } + + return 0; +} + +int hifc_alloc_irqs(void *hwdev, enum hifc_service_type type, u16 num, + struct irq_info *irq_info_array, u16 *act_num) +{ + struct hifc_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_irq_info *irq_info; + struct irq_alloc_info_st *alloc_info; + int max_num_irq; + u16 free_num_irq; + int i, j; + + if (!hwdev || !irq_info_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + free_num_irq = irq_info->num_irq_remain; + + mutex_lock(&irq_info->irq_mutex); + + if (num > free_num_irq) { + if (free_num_irq == 0) { + sdk_err(dev->dev_hdl, + "no free irq resource in cfg mgmt.\n"); + mutex_unlock(&irq_info->irq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "only %d irq resource in cfg mgmt.\n", + free_num_irq); + num = free_num_irq; + } + + *act_num = 0; + + for (i = 0; i < num; i++) { + for (j = 0; j < max_num_irq; j++) { + if (alloc_info[j].free == CFG_FREE) { + if (irq_info->num_irq_remain == 0) { + sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n"); + mutex_unlock(&irq_info->irq_mutex); + return -EINVAL; + } + alloc_info[j].type = type; + alloc_info[j].free = CFG_BUSY; + + irq_info_array[i].msix_entry_idx = + alloc_info[j].info.msix_entry_idx; + irq_info_array[i].irq_id = + alloc_info[j].info.irq_id; + (*act_num)++; + irq_info->num_irq_remain--; + + break; + } + } + } + + mutex_unlock(&irq_info->irq_mutex); + return 0; +} + +void hifc_free_irq(void *hwdev, enum hifc_service_type type, u32 irq_id) +{ + struct hifc_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt; + struct cfg_irq_info *irq_info; + struct irq_alloc_info_st *alloc_info; + int max_num_irq; + int i; + + if (!hwdev) + return; + + cfg_mgmt = dev->cfg_mgmt; + irq_info = &cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + + mutex_lock(&irq_info->irq_mutex); + + for (i = 0; i < max_num_irq; i++) { + if (irq_id == alloc_info[i].info.irq_id && + type == alloc_info[i].type) { + if (alloc_info[i].free == CFG_BUSY) { + alloc_info[i].free = CFG_FREE; + irq_info->num_irq_remain++; + if (irq_info->num_irq_remain > max_num_irq) { + sdk_err(dev->dev_hdl, "Find target,but over range\n"); + mutex_unlock(&irq_info->irq_mutex); + return; + } + break; + } + } + } + + if (i >= max_num_irq) + sdk_warn(dev->dev_hdl, "Irq %d don`t need to free\n", irq_id); + + mutex_unlock(&irq_info->irq_mutex); +} + +int init_cfg_mgmt(struct hifc_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt; + + cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); + if (!cfg_mgmt) + return -ENOMEM; + + dev->cfg_mgmt = cfg_mgmt; + cfg_mgmt->hwdev = dev; + + err = cfg_init_eq(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n", + err); + goto free_mgmt_mem; + } + + err = cfg_init_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n", + err); + goto free_eq_mem; + } + + err = cfg_enable_interrupt(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n", + err); + goto free_interrupt_mem; + } + + return 0; + +free_interrupt_mem: + kfree(cfg_mgmt->irq_param_info.alloc_info); + + cfg_mgmt->irq_param_info.alloc_info = NULL; + +free_eq_mem: + kfree(cfg_mgmt->eq_info.eq); + + cfg_mgmt->eq_info.eq = NULL; + +free_mgmt_mem: + kfree(cfg_mgmt); + return err; +} + +void free_cfg_mgmt(struct hifc_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + /* if the allocated resource were recycled */ + if (cfg_mgmt->irq_param_info.num_irq_remain != + cfg_mgmt->irq_param_info.num_total || + cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq) + sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n"); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + pci_disable_msix(dev->pcidev_hdl); + break; + + case INTR_TYPE_MSI: + pci_disable_msi(dev->pcidev_hdl); + break; + + case INTR_TYPE_INT: + default: + break; + } + + kfree(cfg_mgmt->irq_param_info.alloc_info); + cfg_mgmt->irq_param_info.alloc_info = NULL; + + kfree(cfg_mgmt->eq_info.eq); + cfg_mgmt->eq_info.eq = NULL; + + kfree(cfg_mgmt); +} + +int init_capability(struct hifc_hwdev *dev) +{ + int err; + enum func_type type = HIFC_FUNC_TYPE(dev); + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + cfg_mgmt->svc_cap.timer_en = 1; + cfg_mgmt->svc_cap.test_xid_alloc_mode = 1; + cfg_mgmt->svc_cap.test_gpa_check_enable = 1; + + err = get_cap_from_fw(dev, type); + if (err) { + sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n"); + return err; + } + + fc_param_fix(dev); + + if (dev->cfg_mgmt->svc_cap.force_up) + dev->feature_cap |= HIFC_FUNC_FORCE_LINK_UP; + + sdk_info(dev->dev_hdl, "Init capability success\n"); + return 0; +} + +void free_capability(struct hifc_hwdev *dev) +{ + sdk_info(dev->dev_hdl, "Free capability success"); +} + +bool hifc_support_fc(void *hwdev, struct fc_service_cap *cap) +{ + struct hifc_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap)); + + return true; +} + +u8 hifc_host_oq_id_mask(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host oq id mask\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val; +} + +u16 hifc_func_max_qnum(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.max_sqs; +} + +/* Caller should ensure atomicity when calling this function */ +int hifc_stateful_init(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + int err; + + if (!dev) + return -EINVAL; + + if (dev->statufull_ref_cnt++) + return 0; + + err = cqm_init(dev); + if (err) { + sdk_err(dev->dev_hdl, "Failed to init cqm, err: %d\n", err); + goto init_cqm_err; + } + + sdk_info(dev->dev_hdl, "Initialize statefull resource success\n"); + + return 0; + +init_cqm_err: + + dev->statufull_ref_cnt--; + + return err; +} + +/* Caller should ensure atomicity when calling this function */ +void hifc_stateful_deinit(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!dev || !dev->statufull_ref_cnt) + return; + + if (--dev->statufull_ref_cnt) + return; + + cqm_uninit(hwdev); + + sdk_info(dev->dev_hdl, "Clear statefull resource success\n"); +} + +bool hifc_is_hwdev_mod_inited(void *hwdev, enum hifc_hwdev_init_state state) +{ + struct hifc_hwdev *dev = hwdev; + + if (!hwdev || state >= HIFC_HWDEV_MAX_INVAL_INITED) + return false; + + return !!test_bit(state, &dev->func_state); +} + +static int hifc_os_dep_init(struct hifc_hwdev *hwdev) +{ + hwdev->workq = create_singlethread_workqueue(HIFC_HW_WQ_NAME); + if (!hwdev->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize hardware workqueue\n"); + return -EFAULT; + } + + sema_init(&hwdev->fault_list_sem, 1); + + return 0; +} + +static void hifc_os_dep_deinit(struct hifc_hwdev *hwdev) +{ + destroy_workqueue(hwdev->workq); +} + +static int __hilink_phy_init(struct hifc_hwdev *hwdev) +{ + int err; + + err = hifc_phy_init_status_judge(hwdev); + if (err) { + sdk_info(hwdev->dev_hdl, "Phy init failed\n"); + return err; + } + + return 0; +} + +static int init_hwdev_and_hwif(struct hifc_init_para *para) +{ + struct hifc_hwdev *hwdev; + int err; + + if (!(*para->hwdev)) { + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + *para->hwdev = hwdev; + hwdev->adapter_hdl = para->adapter_hdl; + hwdev->pcidev_hdl = para->pcidev_hdl; + hwdev->dev_hdl = para->dev_hdl; + hwdev->chip_node = para->chip_node; + + hwdev->chip_fault_stats = vzalloc(HIFC_CHIP_FAULT_SIZE); + if (!hwdev->chip_fault_stats) + goto alloc_chip_fault_stats_err; + + err = hifc_init_hwif(hwdev, para->cfg_reg_base, + para->intr_reg_base, + para->db_base_phy, para->db_base, + para->dwqe_mapping); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init hwif\n"); + goto init_hwif_err; + } + } + + return 0; + +init_hwif_err: + vfree(hwdev->chip_fault_stats); + +alloc_chip_fault_stats_err: + + *para->hwdev = NULL; + + return -EFAULT; +} + +static void deinit_hwdev_and_hwif(struct hifc_hwdev *hwdev) +{ + hifc_free_hwif(hwdev); + + vfree(hwdev->chip_fault_stats); + + kfree(hwdev); +} + +static int init_hw_cfg(struct hifc_hwdev *hwdev) +{ + int err; + + err = init_capability(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init capability\n"); + return err; + } + + err = __hilink_phy_init(hwdev); + if (err) + goto hilink_phy_init_err; + + return 0; + +hilink_phy_init_err: + free_capability(hwdev); + + return err; +} + +/* Return: + * 0: all success + * >0: partitial success + * <0: all failed + */ +int hifc_init_hwdev(struct hifc_init_para *para) +{ + struct hifc_hwdev *hwdev; + int err; + + err = init_hwdev_and_hwif(para); + if (err) + return err; + + hwdev = *para->hwdev; + + /* detect slave host according to BAR reg */ + hwdev->feature_cap = HIFC_FUNC_MGMT | HIFC_FUNC_PORT | + HIFC_FUNC_SUPP_RATE_LIMIT | HIFC_FUNC_SUPP_DFX_REG | + HIFC_FUNC_SUPP_RX_MODE | HIFC_FUNC_SUPP_SET_VF_MAC_VLAN | + HIFC_FUNC_SUPP_CHANGE_MAC; + + err = hifc_os_dep_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init os dependent\n"); + goto os_dep_init_err; + } + + hifc_set_chip_present(hwdev); + hifc_init_heartbeat(hwdev); + + err = init_cfg_mgmt(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n"); + goto init_cfg_mgmt_err; + } + + err = hifc_init_comm_ch(hwdev); + if (err) { + if (!(hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK)) { + sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n"); + goto init_comm_ch_err; + } else { + sdk_err(hwdev->dev_hdl, "Init communication channel partitail failed\n"); + return hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK; + } + } + + err = init_hw_cfg(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init hardware config\n"); + goto init_hw_cfg_err; + } + + set_bit(HIFC_HWDEV_ALL_INITED, &hwdev->func_state); + + sdk_info(hwdev->dev_hdl, "Init hwdev success\n"); + + return 0; + +init_hw_cfg_err: + return (hwdev->func_state & HIFC_HWDEV_INIT_MODES_MASK); + +init_comm_ch_err: + free_cfg_mgmt(hwdev); + +init_cfg_mgmt_err: + hifc_destroy_heartbeat(hwdev); + hifc_os_dep_deinit(hwdev); + +os_dep_init_err: + deinit_hwdev_and_hwif(hwdev); + *para->hwdev = NULL; + + return -EFAULT; +} + +void hifc_free_hwdev(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + enum hifc_hwdev_init_state state = HIFC_HWDEV_ALL_INITED; + int flag = 0; + + if (!hwdev) + return; + + if (test_bit(HIFC_HWDEV_ALL_INITED, &dev->func_state)) { + clear_bit(HIFC_HWDEV_ALL_INITED, &dev->func_state); + + /* BM slave function not need to exec rx_tx_flush */ + + hifc_func_rx_tx_flush(hwdev); + + free_capability(dev); + } + while (state > HIFC_HWDEV_NONE_INITED) { + if (test_bit(state, &dev->func_state)) { + flag = 1; + break; + } + state--; + } + if (flag) { + hifc_uninit_comm_ch(dev); + free_cfg_mgmt(dev); + hifc_destroy_heartbeat(dev); + hifc_os_dep_deinit(dev); + } + clear_bit(HIFC_HWDEV_NONE_INITED, &dev->func_state); + + deinit_hwdev_and_hwif(dev); +} + +u64 hifc_get_func_feature_cap(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function feature capability\n"); + return 0; + } + + return dev->feature_cap; +} + diff --git a/drivers/scsi/huawei/hifc/hifc_cfg.h b/drivers/scsi/huawei/hifc/hifc_cfg.h new file mode 100644 index 000000000000..b8a9dd35b1fd --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cfg.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __CFG_MGT_H__ +#define __CFG_MGT_H__ + +enum { + CFG_FREE = 0, + CFG_BUSY = 1 +}; + +/* FC */ +#define FC_PCTX_SZ 256 +#define FC_CCTX_SZ 256 +#define FC_SQE_SZ 128 +#define FC_SCQC_SZ 64 +#define FC_SCQE_SZ 64 +#define FC_SRQC_SZ 64 +#define FC_SRQE_SZ 32 + +/* device capability */ +struct service_cap { + /* Host global resources */ + u16 host_total_function; + u8 host_oq_id_mask_val; + + /* DO NOT get interrupt_type from firmware */ + enum intr_type interrupt_type; + u8 intr_chip_en; + + u8 port_id; /* PF/VF's physical port */ + u8 force_up; + + u8 timer_en; /* 0:disable, 1:enable */ + + u16 max_sqs; + u16 max_rqs; + + /* For test */ + bool test_xid_alloc_mode; + bool test_gpa_check_enable; + + u32 max_connect_num; /* PF/VF maximum connection number(1M) */ + /* The maximum connections which can be stick to cache memory, max 1K */ + u16 max_stick2cache_num; + + struct nic_service_cap nic_cap; /* NIC capability */ + struct fc_service_cap fc_cap; /* FC capability */ +}; + +struct hifc_sync_time_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u64 mstime; +}; + +struct cfg_eq { + enum hifc_service_type type; + int eqn; + int free; /* 1 - alocated, 0- freed */ +}; + +struct cfg_eq_info { + struct cfg_eq *eq; + u8 num_ceq; + u8 num_ceq_remain; + /* mutex used for allocate EQs */ + struct mutex eq_mutex; +}; + +struct irq_alloc_info_st { + enum hifc_service_type type; + int free; /* 1 - alocated, 0- freed */ + struct irq_info info; +}; + +struct cfg_irq_info { + struct irq_alloc_info_st *alloc_info; + u16 num_total; + u16 num_irq_remain; + u16 num_irq_hw; /* device max irq number */ + + /* mutex used for allocate EQs */ + struct mutex irq_mutex; +}; + +#define VECTOR_THRESHOLD 2 + +struct cfg_mgmt_info { + struct hifc_hwdev *hwdev; + struct service_cap svc_cap; + struct cfg_eq_info eq_info; /* EQ */ + struct cfg_irq_info irq_param_info; /* IRQ */ + u32 func_seq_num; /* temporary */ +}; + +enum cfg_sub_cmd { + /* PPF(PF) <-> FW */ + HIFC_CFG_NIC_CAP = 0, + CFG_FW_VERSION, + CFG_UCODE_VERSION, + HIFC_CFG_FUNC_CAP, + HIFC_CFG_MBOX_CAP = 6, +}; + +struct hifc_dev_cap { + u8 status; + u8 version; + u8 rsvd0[6]; + + /* Public resource */ + u8 sf_svc_attr; + u8 host_id; + u8 sf_en_pf; + u8 sf_en_vf; + + u8 ep_id; + u8 intr_type; + u8 max_cos_id; + u8 er_id; + u8 port_id; + u8 max_vf; + u16 svc_cap_en; + u16 host_total_func; + u8 host_oq_id_mask_val; + u8 max_vf_cos_id; + + u32 max_conn_num; + u16 max_stick2cache_num; + u16 max_bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + u8 cfg_file_ver; + u8 net_port_mode; + u8 valid_cos_bitmap; /* every bit indicate cos is valid */ + u8 force_up; + u32 pf_num; + u32 pf_id_start; + u32 vf_num; + u32 vf_id_start; + + /* shared resource */ + u32 host_pctx_num; + u8 host_sf_en; + u8 rsvd2[3]; + u32 host_ccxt_num; + u32 host_scq_num; + u32 host_srq_num; + u32 host_mpt_num; + /* l2nic */ + u16 nic_max_sq; + u16 nic_max_rq; + u32 rsvd[46]; + /* FC */ + u32 fc_max_pctx; + u32 fc_max_scq; + u32 fc_max_srq; + + u32 fc_max_cctx; + u32 fc_cctx_id_start; + + u8 fc_vp_id_start; + u8 fc_vp_id_end; + u16 func_id; +}; +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_cmdq.c b/drivers/scsi/huawei/hifc/hifc_cmdq.c new file mode 100644 index 000000000000..03531017c412 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cmdq.c @@ -0,0 +1,1507 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_wq.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_eqs.h" +#include "hifc_cmdq.h" + +#define CMDQ_CMD_TIMEOUT 1000 /* millisecond */ +#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) +#define LOWER_8_BITS(data) ((data) & 0xFF) + +#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 +#define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT 23 +#define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24 +#define CMDQ_DB_INFO_SRC_TYPE_SHIFT 27 +#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU +#define CMDQ_DB_INFO_QUEUE_TYPE_MASK 0x1U +#define CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7U +#define CMDQ_DB_INFO_SRC_TYPE_MASK 0x1FU + +#define CMDQ_DB_INFO_SET(val, member) \ + (((val) & CMDQ_DB_INFO_##member##_MASK) << \ + CMDQ_DB_INFO_##member##_SHIFT) + +#define CMDQ_CTRL_PI_SHIFT 0 +#define CMDQ_CTRL_CMD_SHIFT 16 +#define CMDQ_CTRL_MOD_SHIFT 24 +#define CMDQ_CTRL_ACK_TYPE_SHIFT 29 +#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 +#define CMDQ_CTRL_PI_MASK 0xFFFFU +#define CMDQ_CTRL_CMD_MASK 0xFFU +#define CMDQ_CTRL_MOD_MASK 0x1FU +#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U +#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_CTRL_SET(val, member) \ + (((val) & CMDQ_CTRL_##member##_MASK) \ + << CMDQ_CTRL_##member##_SHIFT) + +#define CMDQ_CTRL_GET(val, member) \ + (((val) >> CMDQ_CTRL_##member##_SHIFT) \ + & CMDQ_CTRL_##member##_MASK) + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 +#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 +#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 +#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 +#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU +#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_WQE_HEADER_SET(val, member) \ + (((val) & CMDQ_WQE_HEADER_##member##_MASK) \ + << CMDQ_WQE_HEADER_##member##_SHIFT) + +#define CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) \ + & CMDQ_WQE_HEADER_##member##_MASK) + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define CMDQ_CTXT_EQ_ID_SHIFT 56 +#define CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63 +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_EQ_ID_MASK 0x1F +#define CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1 + +#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) \ + << CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \ + & CMDQ_CTXT_##member##_MASK) + +#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define CMDQ_CTXT_CI_SHIFT 52 +#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_CI_MASK 0xFFF + +#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) \ + << CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) \ + & CMDQ_CTXT_##member##_MASK) + +#define SAVED_DATA_ARM_SHIFT 31 +#define SAVED_DATA_ARM_MASK 0x1U + +#define SAVED_DATA_SET(val, member) \ + (((val) & SAVED_DATA_##member##_MASK) \ + << SAVED_DATA_##member##_SHIFT) + +#define SAVED_DATA_CLEAR(val, member) \ + ((val) & (~(SAVED_DATA_##member##_MASK \ + << SAVED_DATA_##member##_SHIFT))) + +#define WQE_ERRCODE_VAL_SHIFT 20 +#define WQE_ERRCODE_VAL_MASK 0xF + +#define WQE_ERRCODE_GET(val, member) \ + (((val) >> WQE_ERRCODE_##member##_SHIFT) & \ + WQE_ERRCODE_##member##_MASK) + +#define CEQE_CMDQ_TYPE_SHIFT 0 +#define CEQE_CMDQ_TYPE_MASK 0x7 + +#define CEQE_CMDQ_GET(val, member) \ + (((val) >> CEQE_CMDQ_##member##_SHIFT) & CEQE_CMDQ_##member##_MASK) + +#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define WQE_HEADER(wqe) ((struct hifc_cmdq_header *)(wqe)) + +#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) + +#define CMDQ_DB_ADDR(db_base, pi) \ + (((u8 *)(db_base) + HIFC_DB_OFF) + CMDQ_DB_PI_OFF(pi)) + +#define CMDQ_PFN_SHIFT 12 +#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) + +#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 +#define COMPLETE_LEN 3 +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_WQE_SIZE 64 +#define CMDQ_WQ_PAGE_SIZE 4096 + +#define WQE_NUM_WQEBBS(wqe_size, wq) \ + ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size)) + +#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ + struct hifc_cmdqs, cmdq[0]) + +#define CMDQ_SEND_CMPT_CODE 10 +#define CMDQ_COMPLETE_CMPT_CODE 11 + +#define HIFC_GET_CMDQ_FREE_WQEBBS(cmdq_wq) \ + atomic_read(&(cmdq_wq)->delta) + +enum cmdq_scmd_type { + CMDQ_SET_ARM_CMD = 2, +}; + +enum cmdq_wqe_type { + WQE_LCMD_TYPE, + WQE_SCMD_TYPE, +}; + +enum ctrl_sect_len { + CTRL_SECT_LEN = 1, + CTRL_DIRECT_SECT_LEN = 2, +}; + +enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, + BUFDESC_SCMD_LEN = 3, +}; + +enum data_format { + DATA_SGE, + DATA_DIRECT, +}; + +enum completion_format { + COMPLETE_DIRECT, + COMPLETE_SGE, +}; + +enum completion_request { + CEQ_SET = 1, +}; + +enum cmdq_cmd_type { + SYNC_CMD_DIRECT_RESP, + SYNC_CMD_SGE_RESP, + ASYNC_CMD, +}; + +bool hifc_cmdq_idle(struct hifc_cmdq *cmdq) +{ + struct hifc_wq *wq = cmdq->wq; + + return (atomic_read(&wq->delta) == wq->q_depth ? true : false); +} + +struct hifc_cmd_buf *hifc_alloc_cmd_buf(void *hwdev) +{ + struct hifc_cmdqs *cmdqs; + struct hifc_cmd_buf *cmd_buf; + void *dev; + + if (!hwdev) { + pr_err("Failed to alloc cmd buf, Invalid hwdev\n"); + return NULL; + } + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + dev = ((struct hifc_hwdev *)hwdev)->dev_hdl; + + cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC); + if (!cmd_buf) + return NULL; + + cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, + &cmd_buf->dma_addr); + if (!cmd_buf->buf) { + sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n"); + goto alloc_pci_buf_err; + } + + return cmd_buf; + +alloc_pci_buf_err: + kfree(cmd_buf); + return NULL; +} + +void hifc_free_cmd_buf(void *hwdev, struct hifc_cmd_buf *cmd_buf) +{ + struct hifc_cmdqs *cmdqs; + + if (!hwdev || !cmd_buf) { + pr_err("Failed to free cmd buf\n"); + return; + } + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + + pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); + kfree(cmd_buf); +} + +static int cmdq_wqe_size(enum cmdq_wqe_type wqe_type) +{ + int wqe_size = 0; + + switch (wqe_type) { + case WQE_LCMD_TYPE: + wqe_size = WQE_LCMD_SIZE; + break; + case WQE_SCMD_TYPE: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static int cmdq_get_wqe_size(enum bufdesc_len len) +{ + int wqe_size = 0; + + switch (len) { + case BUFDESC_LCMD_LEN: + wqe_size = WQE_LCMD_SIZE; + break; + case BUFDESC_SCMD_LEN: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + +static void cmdq_set_completion(struct hifc_cmdq_completion *complete, + struct hifc_cmd_buf *buf_out) +{ + struct hifc_sge_resp *sge_resp = &complete->sge_resp; + + hifc_set_sge(&sge_resp->sge, buf_out->dma_addr, + HIFC_CMDQ_BUF_SIZE); +} + +static void cmdq_set_lcmd_bufdesc(struct hifc_cmdq_wqe_lcmd *wqe, + struct hifc_cmd_buf *buf_in) +{ + hifc_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size); +} + +static void cmdq_set_inline_wqe_data(struct hifc_cmdq_inline_wqe *wqe, + const void *buf_in, u32 in_size) +{ + struct hifc_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; + + wqe_scmd->buf_desc.buf_len = in_size; + memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); +} + +static void cmdq_fill_db(struct hifc_cmdq_db *db, + enum hifc_cmdq_type cmdq_type, u16 prod_idx) +{ + db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) | + CMDQ_DB_INFO_SET(HIFC_DB_CMDQ_TYPE, QUEUE_TYPE) | + CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) | + CMDQ_DB_INFO_SET(HIFC_DB_SRC_CMDQ_TYPE, SRC_TYPE); +} + +static void cmdq_set_db(struct hifc_cmdq *cmdq, + enum hifc_cmdq_type cmdq_type, u16 prod_idx) +{ + struct hifc_cmdq_db db; + + cmdq_fill_db(&db, cmdq_type, prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + db.db_info = cpu_to_be32(db.db_info); + + wmb(); /* write all before the doorbell */ + writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); +} + +static void cmdq_wqe_fill(void *dst, const void *src) +{ + memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST, + (u8 *)src + FIRST_DATA_TO_WRITE_LAST, + CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void cmdq_prepare_wqe_ctrl(struct hifc_cmdq_wqe *wqe, int wrapped, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, u16 prod_idx, + enum completion_format complete_format, + enum data_format data_format, + enum bufdesc_len buf_len) +{ + struct hifc_ctrl *ctrl; + enum ctrl_sect_len ctrl_len; + struct hifc_cmdq_wqe_lcmd *wqe_lcmd; + struct hifc_cmdq_wqe_scmd *wqe_scmd; + u32 saved_data = WQE_HEADER(wqe)->saved_data; + + if (data_format == DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->inline_wqe.wqe_scmd; + + wqe_scmd->status.status_info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = CTRL_DIRECT_SECT_LEN; + } + + ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) | + CMDQ_CTRL_SET(cmd, CMD) | + CMDQ_CTRL_SET(mod, MOD) | + CMDQ_CTRL_SET(ack_type, ACK_TYPE); + + WQE_HEADER(wqe)->header_info = + CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | + CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | + CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | + CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | + CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | + CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | + CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == CMDQ_SET_ARM_CMD && mod == HIFC_MOD_COMM) { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data | + SAVED_DATA_SET(1, ARM); + } else { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER(wqe)->saved_data = saved_data; + } +} + +static void cmdq_set_lcmd_wqe(struct hifc_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + struct hifc_cmd_buf *buf_in, + struct hifc_cmd_buf *buf_out, int wrapped, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hifc_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_SGE_RESP: + if (buf_out) { + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_lcmd->completion, buf_out); + } + break; + case SYNC_CMD_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + break; + case ASYNC_CMD: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + + wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in); + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, + prod_idx, complete_format, DATA_SGE, + BUFDESC_LCMD_LEN); + + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static void cmdq_set_inline_wqe(struct hifc_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + void *buf_in, u16 in_size, + struct hifc_cmd_buf *buf_out, int wrapped, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hifc_cmdq_wqe_scmd *wqe_scmd = &wqe->inline_wqe.wqe_scmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case SYNC_CMD_SGE_RESP: + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_scmd->completion, buf_out); + break; + case SYNC_CMD_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_scmd->completion.direct_resp = 0; + break; + default: + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, + complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); + + cmdq_set_inline_wqe_data(&wqe->inline_wqe, buf_in, in_size); +} + +static void cmdq_update_cmd_status(struct hifc_cmdq *cmdq, u16 prod_idx, + struct hifc_cmdq_wqe *wqe) +{ + struct hifc_cmdq_cmd_info *cmd_info; + struct hifc_cmdq_wqe_lcmd *wqe_lcmd; + u32 status_info; + + wqe_lcmd = &wqe->wqe_lcmd; + cmd_info = &cmdq->cmd_infos[prod_idx]; + + if (cmd_info->errcode) { + status_info = be32_to_cpu(wqe_lcmd->status.status_info); + *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL); + } + + if (cmd_info->direct_resp && + cmd_info->cmd_type == HIFC_CMD_TYPE_DIRECT_RESP) + *cmd_info->direct_resp = + cpu_to_be64(wqe_lcmd->completion.direct_resp); +} + +static int hifc_cmdq_sync_timeout_check(struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, u16 pi, + enum hifc_mod_type mod, u8 cmd) +{ + struct hifc_cmdq_wqe_lcmd *wqe_lcmd; + struct hifc_ctrl *ctrl; + u32 ctrl_info; + + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + if (!WQE_COMPLETED(ctrl_info)) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EFAULT; + } + + cmdq_update_cmd_status(cmdq, pi, wqe); + + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed, mod: %u, cmd: 0x%x\n", + mod, cmd); + return 0; +} + +static void __clear_cmd_info(struct hifc_cmdq_cmd_info *cmd_info, + struct hifc_cmdq_cmd_info *saved_cmd_info) +{ + if (cmd_info->errcode == saved_cmd_info->errcode) + cmd_info->errcode = NULL; + + if (cmd_info->done == saved_cmd_info->done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == saved_cmd_info->direct_resp) + cmd_info->direct_resp = NULL; +} + +static int +cmdq_sync_cmd_timeout_handler(struct hifc_cmdq *cmdq, + struct hifc_cmdq_cmd_info *cmd_info, + struct hifc_cmdq_cmd_info *saved_cmd_info, + struct hifc_cmdq_wqe *curr_wqe, + enum hifc_mod_type mod, u8 cmd, + u16 curr_prod_idx, u64 curr_msg_id) +{ + int err; + + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code) + cmd_info->cmpt_code = NULL; + + if (*saved_cmd_info->cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command (mod: %u, cmd: 0x%x)has been completed\n", + mod, cmd); + spin_unlock_bh(&cmdq->cmdq_lock); + return 0; + } + + if (curr_msg_id == cmd_info->cmdq_msg_id) { + err = hifc_cmdq_sync_timeout_check(cmdq, curr_wqe, + curr_prod_idx, + mod, cmd); + if (err) + cmd_info->cmd_type = HIFC_CMD_TYPE_TIMEOUT; + else + cmd_info->cmd_type = HIFC_CMD_TYPE_FAKE_TIMEOUT; + } else { + err = -ETIMEDOUT; + sdk_err(cmdq->hwdev->dev_hdl, + "Cmdq sync command current msg id dismatch with cmd_info msg id, mod: %u, cmd: 0x%x\n", + mod, cmd); + } + + __clear_cmd_info(cmd_info, saved_cmd_info); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return err; +} + +static int cmdq_sync_cmd_direct_resp(struct hifc_cmdq *cmdq, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, + u64 *out_param, u32 timeout) +{ + struct hifc_wq *wq = cmdq->wq; + struct hifc_cmdq_wqe *curr_wqe, wqe; + struct hifc_cmdq_cmd_info *cmd_info, saved_cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + int cmpt_code = CMDQ_SEND_CMPT_CODE; + ulong timeo; + u64 curr_msg_id; + int err; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* in order to save a wqebb for setting arm_bit when + * send cmdq commands frequently resulting in cmdq full + */ + if (HIFC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow */ + curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = &cmpt_code; + + memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, + wrapped, ack_type, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hifc_cpu_to_be32(&wqe, wqe_size); + + /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + cmd_info->cmd_type = HIFC_CMD_TYPE_DIRECT_RESP; + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, HIFC_CMDQ_SYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); + if (!wait_for_completion_timeout(&done, timeo)) { + err = cmdq_sync_cmd_timeout_handler(cmdq, cmd_info, + &saved_cmd_info, + curr_wqe, mod, cmd, + curr_prod_idx, curr_msg_id); + + if (!err) + goto timeout_check_ok; + + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, prod idx: 0x%x\n", + curr_prod_idx); + return -ETIMEDOUT; + } + +timeout_check_ok: + smp_rmb(); /* read error code after completion */ + + if (errcode > 1) + return errcode; + + return 0; +} + +static int cmdq_sync_cmd_detail_resp(struct hifc_cmdq *cmdq, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, + struct hifc_cmd_buf *buf_out, + u32 timeout) +{ + struct hifc_wq *wq = cmdq->wq; + struct hifc_cmdq_wqe *curr_wqe, wqe; + struct hifc_cmdq_cmd_info *cmd_info, saved_cmd_info; + struct completion done; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, errcode = 0, wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + int cmpt_code = CMDQ_SEND_CMPT_CODE; + ulong timeo; + u64 curr_msg_id; + int err; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* in order to save a wqebb for setting arm_bit when + * send cmdq commands frequently resulting in cmdq full + */ + if (HIFC_GET_CMDQ_FREE_WQEBBS(wq) < num_wqebbs + 1) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb, mod: %u, cmd: 0x%x\n", + mod, cmd); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + + init_completion(&done); + + cmd_info->done = &done; + cmd_info->errcode = &errcode; + cmd_info->cmpt_code = &cmpt_code; + + memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + + cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out, + wrapped, ack_type, mod, cmd, curr_prod_idx); + + hifc_cpu_to_be32(&wqe, wqe_size); + + cmdq_wqe_fill(curr_wqe, &wqe); + + cmd_info->cmd_type = HIFC_CMD_TYPE_SGE_RESP; + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_set_db(cmdq, HIFC_CMDQ_SYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT); + if (!wait_for_completion_timeout(&done, timeo)) { + err = cmdq_sync_cmd_timeout_handler(cmdq, cmd_info, + &saved_cmd_info, + curr_wqe, mod, cmd, + curr_prod_idx, curr_msg_id); + if (!err) + goto timeout_check_ok; + + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command timeout, prod idx: 0x%x\n", + curr_prod_idx); + return -ETIMEDOUT; + } + +timeout_check_ok: + + smp_rmb(); /* read error code after completion */ + + if (errcode > 1) + return errcode; + + return 0; +} + +static int cmdq_async_cmd(struct hifc_cmdq *cmdq, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in) +{ + struct hifc_wq *wq = cmdq->wq; + int wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE); + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + struct hifc_cmdq_wqe *curr_wqe, wqe; + int wrapped; + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + spin_lock_bh(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= cmdq->wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= cmdq->wq->q_depth; + } + + cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped, + ack_type, mod, cmd, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hifc_cpu_to_be32(&wqe, wqe_size); + + cmdq_wqe_fill(curr_wqe, &wqe); + + cmdq->cmd_infos[curr_prod_idx].cmd_type = HIFC_CMD_TYPE_ASYNC; + + cmdq_set_db(cmdq, HIFC_CMDQ_ASYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return 0; +} + +static int cmdq_set_arm_bit(struct hifc_cmdq *cmdq, void *buf_in, u16 in_size) +{ + struct hifc_wq *wq = cmdq->wq; + struct hifc_cmdq_wqe *curr_wqe, wqe; + u16 curr_prod_idx, next_prod_idx, num_wqebbs; + int wrapped, wqe_size = cmdq_wqe_size(WQE_SCMD_TYPE); + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq); + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + curr_wqe = hifc_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx); + if (!curr_wqe) { + spin_unlock_bh(&cmdq->cmdq_lock); + sdk_err(cmdq->hwdev->dev_hdl, "Can not get avalible wqebb setting arm\n"); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmdq_set_inline_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, in_size, NULL, + wrapped, HIFC_ACK_TYPE_CMDQ, HIFC_MOD_COMM, + CMDQ_SET_ARM_CMD, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hifc_cpu_to_be32(&wqe, wqe_size); + + /* cmdq wqe is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_wqe, &wqe); + + cmdq->cmd_infos[curr_prod_idx].cmd_type = HIFC_CMD_TYPE_SET_ARM; + + cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + return 0; +} + +static int cmdq_params_valid(void *hwdev, struct hifc_cmd_buf *buf_in) +{ + if (!buf_in || !hwdev) { + pr_err("Invalid CMDQ buffer addr\n"); + return -EINVAL; + } + + if (!buf_in->size || buf_in->size > HIFC_CMDQ_MAX_DATA_SIZE) { + pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size); + return -EINVAL; + } + + return 0; +} + +#define WAIT_CMDQ_ENABLE_TIMEOUT 300 + +static int wait_cmdqs_enable(struct hifc_cmdqs *cmdqs) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); + do { + if (cmdqs->status & HIFC_CMDQ_ENABLE) + return 0; + } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag && + !cmdqs->disable_flag); + + cmdqs->disable_flag = 1; + + return -EBUSY; +} + +int hifc_cmdq_direct_resp(void *hwdev, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, u64 *out_param, + u32 timeout) +{ + struct hifc_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) { + pr_err("Invalid CMDQ parameters\n"); + return err; + } + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) || + !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HIFC_CMDQ_SYNC], ack_type, + mod, cmd, buf_in, out_param, timeout); + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} + +int hifc_cmdq_detail_resp(void *hwdev, + enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, + struct hifc_cmd_buf *buf_out, + u32 timeout) +{ + struct hifc_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) || + !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[HIFC_CMDQ_SYNC], ack_type, + mod, cmd, buf_in, buf_out, timeout); + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) + return -ETIMEDOUT; + else + return err; +} + +int hifc_cmdq_async(void *hwdev, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in) +{ + struct hifc_cmdqs *cmdqs; + int err = cmdq_params_valid(hwdev, buf_in); + + if (err) + return err; + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) || + !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + return cmdq_async_cmd(&cmdqs->cmdq[HIFC_CMDQ_ASYNC], ack_type, mod, + cmd, buf_in); +} + +int hifc_set_arm_bit(void *hwdev, enum hifc_set_arm_type q_type, u16 q_id) +{ + struct hifc_cmdqs *cmdqs; + struct hifc_cmdq *cmdq; + struct hifc_cmdq_arm_bit arm_bit; + enum hifc_cmdq_type cmdq_type = HIFC_CMDQ_SYNC; + u16 in_size; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) || + !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CMDQ_INITED)) + return -EPERM; + + cmdqs = ((struct hifc_hwdev *)hwdev)->cmdqs; + + if (!(cmdqs->status & HIFC_CMDQ_ENABLE)) + return -EBUSY; + + if (q_type == HIFC_SET_ARM_CMDQ) { + if (q_id >= HIFC_MAX_CMDQ_TYPES) + return -EFAULT; + + cmdq_type = q_id; + } + /* sq is using interrupt now, so we only need to set arm bit for cmdq, + * remove comment below if need to set sq arm bit + * else + * cmdq_type = HIFC_CMDQ_SYNC; + */ + + cmdq = &cmdqs->cmdq[cmdq_type]; + + arm_bit.q_type = q_type; + arm_bit.q_id = q_id; + in_size = sizeof(arm_bit); + + err = cmdq_set_arm_bit(cmdq, &arm_bit, in_size); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, + "Failed to set arm for q_type: %d, qid %d\n", + q_type, q_id); + return err; + } + + return 0; +} + +static void clear_wqe_complete_bit(struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, u16 ci) +{ + struct hifc_cmdq_wqe_lcmd *wqe_lcmd; + struct hifc_cmdq_inline_wqe *inline_wqe; + struct hifc_cmdq_wqe_scmd *wqe_scmd; + struct hifc_ctrl *ctrl; + u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info); + int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); + int wqe_size = cmdq_get_wqe_size(buf_len); + u16 num_wqebbs; + + if (wqe_size == WQE_LCMD_SIZE) { + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + } else { + inline_wqe = &wqe->inline_wqe; + wqe_scmd = &inline_wqe->wqe_scmd; + ctrl = &wqe_scmd->ctrl; + } + + /* clear HW busy bit */ + ctrl->ctrl_info = 0; + cmdq->cmd_infos[ci].cmd_type = HIFC_CMD_TYPE_NONE; + + wmb(); /* verify wqe is clear */ + + num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq); + hifc_put_wqe(cmdq->wq, num_wqebbs); +} + +static void cmdq_sync_cmd_handler(struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, u16 cons_idx) +{ + u16 prod_idx = cons_idx; + + spin_lock(&cmdq->cmdq_lock); + + cmdq_update_cmd_status(cmdq, prod_idx, wqe); + + if (cmdq->cmd_infos[prod_idx].cmpt_code) { + *cmdq->cmd_infos[prod_idx].cmpt_code = + CMDQ_COMPLETE_CMPT_CODE; + cmdq->cmd_infos[prod_idx].cmpt_code = NULL; + } + + /* make sure cmpt_code operation before done operation */ + smp_rmb(); + + if (cmdq->cmd_infos[prod_idx].done) { + complete(cmdq->cmd_infos[prod_idx].done); + cmdq->cmd_infos[prod_idx].done = NULL; + } + + spin_unlock(&cmdq->cmdq_lock); + + clear_wqe_complete_bit(cmdq, wqe, cons_idx); +} + +static void cmdq_async_cmd_handler(struct hifc_hwdev *hwdev, + struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, u16 ci) +{ + u64 buf = wqe->wqe_lcmd.buf_desc.saved_async_buf; + int addr_sz = sizeof(u64); + + hifc_be32_to_cpu((void *)&buf, addr_sz); + if (buf) + hifc_free_cmd_buf(hwdev, (struct hifc_cmd_buf *)buf); + + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +static int cmdq_arm_ceq_handler(struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, u16 ci) +{ + struct hifc_cmdq_inline_wqe *inline_wqe = &wqe->inline_wqe; + struct hifc_cmdq_wqe_scmd *wqe_scmd = &inline_wqe->wqe_scmd; + struct hifc_ctrl *ctrl = &wqe_scmd->ctrl; + u32 ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + return -EBUSY; + + clear_wqe_complete_bit(cmdq, wqe, ci); + + return 0; +} + +#define HIFC_CMDQ_WQE_HEAD_LEN 32 +static void hifc_dump_cmdq_wqe_head(struct hifc_hwdev *hwdev, + struct hifc_cmdq_wqe *wqe) +{ + u32 i; + u32 *data = (u32 *)wqe; + + for (i = 0; i < (HIFC_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 4) { + sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + data[i], data[i + 1], data[i + 2], + data[i + 3]);/*lint !e679*/ + } +} + +#define CMDQ_CMD_TYPE_TIMEOUT(cmd_type) \ + ((cmd_type) == HIFC_CMD_TYPE_TIMEOUT || \ + (cmd_type) == HIFC_CMD_TYPE_FAKE_TIMEOUT) + +static inline void cmdq_response_handle(struct hifc_hwdev *hwdev, + struct hifc_cmdq *cmdq, + struct hifc_cmdq_wqe *wqe, + enum hifc_cmdq_type cmdq_type, u16 ci) +{ + if (cmdq_type == HIFC_CMDQ_ASYNC) + cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); + else + cmdq_sync_cmd_handler(cmdq, wqe, ci); +} + +static inline void set_arm_bit(struct hifc_hwdev *hwdev, int set_arm, + enum hifc_cmdq_type cmdq_type) +{ + if (set_arm) + hifc_set_arm_bit(hwdev, HIFC_SET_ARM_CMDQ, cmdq_type); +} + +void hifc_cmdq_ceq_handler(void *handle, u32 ceqe_data) +{ + struct hifc_cmdqs *cmdqs = ((struct hifc_hwdev *)handle)->cmdqs; + enum hifc_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE); + struct hifc_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; + struct hifc_hwdev *hwdev = cmdqs->hwdev; + struct hifc_cmdq_wqe *wqe; + struct hifc_cmdq_wqe_lcmd *wqe_lcmd; + struct hifc_ctrl *ctrl; + struct hifc_cmdq_cmd_info *cmd_info; + u32 ctrl_info; + u16 ci; + int set_arm = 1; + + while ((wqe = hifc_read_wqe(cmdq->wq, 1, &ci)) != NULL) { + cmd_info = &cmdq->cmd_infos[ci]; + + if (cmd_info->cmd_type == HIFC_CMD_TYPE_NONE) { + set_arm = 1; + break; + } else if (CMDQ_CMD_TYPE_TIMEOUT(cmd_info->cmd_type)) { + if (cmd_info->cmd_type == HIFC_CMD_TYPE_TIMEOUT) { + sdk_info(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", + cmdq_type, ci); + hifc_dump_cmdq_wqe_head(hwdev, wqe); + } + + set_arm = 1; + clear_wqe_complete_bit(cmdq, wqe, ci); + } else if (cmd_info->cmd_type == HIFC_CMD_TYPE_SET_ARM) { + /* arm_bit was set until here */ + set_arm = 0; + + if (cmdq_arm_ceq_handler(cmdq, wqe, ci)) + break; + } else { + set_arm = 1; + + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = be32_to_cpu((ctrl)->ctrl_info); + + if (!WQE_COMPLETED(ctrl_info)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the cmdq wqe until we have + * verified the command has been processed and + * written back. + */ + dma_rmb(); + + cmdq_response_handle(hwdev, cmdq, wqe, cmdq_type, ci); + } + } + + set_arm_bit(hwdev, set_arm, cmdq_type); +} + +static void cmdq_init_queue_ctxt(struct hifc_cmdq *cmdq, + struct hifc_cmdq_pages *cmdq_pages, + struct hifc_cmdq_ctxt *cmdq_ctxt) +{ + struct hifc_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + struct hifc_hwdev *hwdev = cmdqs->hwdev; + struct hifc_wq *wq = cmdq->wq; + struct hifc_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn; + u16 start_ci = (u16)wq->cons_idx; + + /* The data in the HW is in Big Endian Format */ + wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); + + pfn = CMDQ_PFN(wq_first_page_paddr); + + ctxt_info->curr_wqe_page_pfn = + CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | + CMDQ_CTXT_PAGE_INFO_SET(HIFC_CEQ_ID_CMDQ, EQ_ID) | + CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN); + + /* If only use one page, use 0-level CLA */ + if (cmdq->wq->num_q_pages != 1) { + cmdq_first_block_paddr = cmdq_pages->cmdq_page_paddr; + pfn = CMDQ_PFN(cmdq_first_block_paddr); + } + + ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) | + CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN); + + cmdq_ctxt->func_idx = hifc_global_func_id_hw(hwdev); + cmdq_ctxt->ppf_idx = HIFC_HWIF_PPF_IDX(hwdev->hwif); + cmdq_ctxt->cmdq_id = cmdq->cmdq_type; +} + +static int init_cmdq(struct hifc_cmdq *cmdq, struct hifc_hwdev *hwdev, + struct hifc_wq *wq, enum hifc_cmdq_type q_type) +{ + void __iomem *db_base; + int err = 0; + + cmdq->wq = wq; + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + cmdq->hwdev = hwdev; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->cmd_infos = kcalloc(wq->q_depth, sizeof(*cmdq->cmd_infos), + GFP_KERNEL); + if (!cmdq->cmd_infos) { + err = -ENOMEM; + goto cmd_infos_err; + } + + err = hifc_alloc_db_addr(hwdev, &db_base, NULL); + if (err) + goto alloc_db_err; + + cmdq->db_base = (u8 *)db_base; + return 0; + +alloc_db_err: + kfree(cmdq->cmd_infos); + +cmd_infos_err: + + return err; +} + +static void free_cmdq(struct hifc_hwdev *hwdev, struct hifc_cmdq *cmdq) +{ + hifc_free_db_addr(hwdev, cmdq->db_base, NULL); + kfree(cmdq->cmd_infos); +} + +int hifc_set_cmdq_ctxts(struct hifc_hwdev *hwdev) +{ + struct hifc_cmdqs *cmdqs = hwdev->cmdqs; + struct hifc_cmdq_ctxt *cmdq_ctxt, cmdq_ctxt_out = {0}; + enum hifc_cmdq_type cmdq_type; + u16 in_size; + u16 out_size = sizeof(*cmdq_ctxt); + int err; + + cmdq_type = HIFC_CMDQ_SYNC; + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt; + cmdq_ctxt->func_idx = hifc_global_func_id_hw(hwdev); + in_size = sizeof(*cmdq_ctxt); + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_CMDQ_CTXT_SET, + cmdq_ctxt, in_size, + &cmdq_ctxt_out, &out_size, 0); + if (err || !out_size || cmdq_ctxt_out.status) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmdq_ctxt_out.status, out_size); + return -EFAULT; + } + } + + cmdqs->status |= HIFC_CMDQ_ENABLE; + cmdqs->disable_flag = 0; + + return 0; +} + +void hifc_cmdq_flush_cmd(struct hifc_hwdev *hwdev, + struct hifc_cmdq *cmdq) +{ + struct hifc_cmdq_wqe *wqe; + struct hifc_cmdq_cmd_info *cmdq_info; + u16 ci, wqe_left, i; + u64 buf; + + spin_lock_bh(&cmdq->cmdq_lock); + wqe_left = cmdq->wq->q_depth - (u16)atomic_read(&cmdq->wq->delta); + ci = MASKED_WQE_IDX(cmdq->wq, cmdq->wq->cons_idx); + for (i = 0; i < wqe_left; i++, ci++) { + ci = MASKED_WQE_IDX(cmdq->wq, ci); + cmdq_info = &cmdq->cmd_infos[ci]; + + if (cmdq_info->cmd_type == HIFC_CMD_TYPE_SET_ARM) + continue; + + if (cmdq->cmdq_type == HIFC_CMDQ_ASYNC) { + wqe = hifc_get_wqebb_addr(cmdq->wq, ci); + buf = wqe->wqe_lcmd.buf_desc.saved_async_buf; + wqe->wqe_lcmd.buf_desc.saved_async_buf = 0; + + hifc_be32_to_cpu((void *)&buf, sizeof(u64)); + if (buf) + hifc_free_cmd_buf(hwdev, + (struct hifc_cmd_buf *)buf); + } else { + if (cmdq_info->done) { + complete(cmdq_info->done); + cmdq_info->done = NULL; + cmdq_info->cmpt_code = NULL; + cmdq_info->direct_resp = NULL; + cmdq_info->errcode = NULL; + } + } + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +int hifc_reinit_cmdq_ctxts(struct hifc_hwdev *hwdev) +{ + struct hifc_cmdqs *cmdqs = hwdev->cmdqs; + enum hifc_cmdq_type cmdq_type; + + cmdq_type = HIFC_CMDQ_SYNC; + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + hifc_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + cmdqs->cmdq[cmdq_type].wrapped = 1; + hifc_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq); + } + + return hifc_set_cmdq_ctxts(hwdev); +} + +int hifc_cmdqs_init(struct hifc_hwdev *hwdev) +{ + struct hifc_cmdqs *cmdqs; + struct hifc_cmdq_ctxt *cmdq_ctxt; + enum hifc_cmdq_type type, cmdq_type; + size_t saved_wqs_size; + u32 max_wqe_size; + int err; + + cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); + if (!cmdqs) + return -ENOMEM; + + hwdev->cmdqs = cmdqs; + cmdqs->hwdev = hwdev; + + saved_wqs_size = HIFC_MAX_CMDQ_TYPES * sizeof(struct hifc_wq); + cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL); + if (!cmdqs->saved_wqs) { + sdk_err(hwdev->dev_hdl, "Failed to allocate saved wqs\n"); + err = -ENOMEM; + goto alloc_wqs_err; + } + + cmdqs->cmd_buf_pool = dma_pool_create("hifc_cmdq", hwdev->dev_hdl, + HIFC_CMDQ_BUF_SIZE, + HIFC_CMDQ_BUF_SIZE, 0ULL); + if (!cmdqs->cmd_buf_pool) { + sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n"); + err = -ENOMEM; + goto pool_create_err; + } + + max_wqe_size = (u32)cmdq_wqe_size(WQE_LCMD_TYPE); + err = hifc_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + hwdev->dev_hdl, HIFC_MAX_CMDQ_TYPES, + hwdev->wq_page_size, CMDQ_WQEBB_SIZE, + HIFC_CMDQ_DEPTH, max_wqe_size); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq\n"); + goto cmdq_alloc_err; + } + + cmdq_type = HIFC_CMDQ_SYNC; + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, + &cmdqs->saved_wqs[cmdq_type], cmdq_type); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%d\n", + cmdq_type); + goto init_cmdq_err; + } + + cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt; + cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq_pages, cmdq_ctxt); + } + + err = hifc_set_cmdq_ctxts(hwdev); + if (err) + goto init_cmdq_err; + + return 0; + +init_cmdq_err: + type = HIFC_CMDQ_SYNC; + for (; type < cmdq_type; type++) + free_cmdq(hwdev, &cmdqs->cmdq[type]); + + hifc_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HIFC_MAX_CMDQ_TYPES); + +cmdq_alloc_err: + dma_pool_destroy(cmdqs->cmd_buf_pool); + +pool_create_err: + kfree(cmdqs->saved_wqs); + +alloc_wqs_err: + kfree(cmdqs); + + return err; +} + +void hifc_cmdqs_free(struct hifc_hwdev *hwdev) +{ + struct hifc_cmdqs *cmdqs = hwdev->cmdqs; + enum hifc_cmdq_type cmdq_type = HIFC_CMDQ_SYNC; + + cmdqs->status &= ~HIFC_CMDQ_ENABLE; + + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + hifc_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]); + } + + hifc_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HIFC_MAX_CMDQ_TYPES); + + dma_pool_destroy(cmdqs->cmd_buf_pool); + + kfree(cmdqs->saved_wqs); + + kfree(cmdqs); +} diff --git a/drivers/scsi/huawei/hifc/hifc_cmdq.h b/drivers/scsi/huawei/hifc/hifc_cmdq.h new file mode 100644 index 000000000000..cb2ac81c5edc --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cmdq.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_CMDQ_H_ +#define HIFC_CMDQ_H_ + +#define HIFC_DB_OFF 0x00000800 + +#define HIFC_SCMD_DATA_LEN 16 + +#define HIFC_CMDQ_DEPTH 4096 + +#define HIFC_CMDQ_BUF_SIZE 2048U +#define HIFC_CMDQ_BUF_HW_RSVD 8 +#define HIFC_CMDQ_MAX_DATA_SIZE \ + (HIFC_CMDQ_BUF_SIZE - HIFC_CMDQ_BUF_HW_RSVD) +#define WQ_PAGE_PFN_SHIFT 12 +#define WQ_BLOCK_PFN_SHIFT 9 + +#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) +#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) + +enum hifc_cmdq_type { + HIFC_CMDQ_SYNC, + HIFC_CMDQ_ASYNC, + HIFC_MAX_CMDQ_TYPES, +}; + +enum hifc_db_src_type { + HIFC_DB_SRC_CMDQ_TYPE, + HIFC_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum hifc_cmdq_db_type { + HIFC_DB_SQ_RQ_TYPE, + HIFC_DB_CMDQ_TYPE, +}; + +/* CMDQ WQE CTRLS */ +struct hifc_cmdq_header { + u32 header_info; + u32 saved_data; +}; + +struct hifc_scmd_bufdesc { + u32 buf_len; + u32 rsvd; + u8 data[HIFC_SCMD_DATA_LEN]; +}; + +struct hifc_lcmd_bufdesc { + struct hifc_sge sge; + u32 rsvd1; + u64 saved_async_buf; + u64 rsvd3; +}; + +struct hifc_cmdq_db { + u32 db_info; + u32 rsvd; +}; + +struct hifc_status { + u32 status_info; +}; + +struct hifc_ctrl { + u32 ctrl_info; +}; + +struct hifc_sge_resp { + struct hifc_sge sge; + u32 rsvd; +}; + +struct hifc_cmdq_completion { + /* HW Format */ + union { + struct hifc_sge_resp sge_resp; + u64 direct_resp; + }; +}; + +struct hifc_cmdq_wqe_scmd { + struct hifc_cmdq_header header; + struct hifc_cmdq_db db; + struct hifc_status status; + struct hifc_ctrl ctrl; + struct hifc_cmdq_completion completion; + struct hifc_scmd_bufdesc buf_desc; +}; + +struct hifc_cmdq_wqe_lcmd { + struct hifc_cmdq_header header; + struct hifc_status status; + struct hifc_ctrl ctrl; + struct hifc_cmdq_completion completion; + struct hifc_lcmd_bufdesc buf_desc; +}; + +struct hifc_cmdq_inline_wqe { + struct hifc_cmdq_wqe_scmd wqe_scmd; +}; + +struct hifc_cmdq_wqe { + /* HW Format */ + union { + struct hifc_cmdq_inline_wqe inline_wqe; + struct hifc_cmdq_wqe_lcmd wqe_lcmd; + }; +}; + +struct hifc_cmdq_arm_bit { + u32 q_type; + u32 q_id; +}; + +struct hifc_cmdq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct hifc_cmdq_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 cmdq_id; + u8 ppf_idx; + + u8 rsvd1[4]; + + struct hifc_cmdq_ctxt_info ctxt_info; +}; + +enum hifc_cmdq_status { + HIFC_CMDQ_ENABLE = BIT(0), +}; + +enum hifc_cmdq_cmd_type { + HIFC_CMD_TYPE_NONE, + HIFC_CMD_TYPE_SET_ARM, + HIFC_CMD_TYPE_DIRECT_RESP, + HIFC_CMD_TYPE_SGE_RESP, + HIFC_CMD_TYPE_ASYNC, + HIFC_CMD_TYPE_TIMEOUT, + HIFC_CMD_TYPE_FAKE_TIMEOUT, +}; + +struct hifc_cmdq_cmd_info { + enum hifc_cmdq_cmd_type cmd_type; + + struct completion *done; + int *errcode; + int *cmpt_code; + u64 *direct_resp; + u64 cmdq_msg_id; +}; + +struct hifc_cmdq { + struct hifc_wq *wq; + + enum hifc_cmdq_type cmdq_type; + int wrapped; + + /* spinlock for send cmdq commands */ + spinlock_t cmdq_lock; + + /* doorbell area */ + u8 __iomem *db_base; + + struct hifc_cmdq_ctxt cmdq_ctxt; + + struct hifc_cmdq_cmd_info *cmd_infos; + + struct hifc_hwdev *hwdev; +}; + +struct hifc_cmdqs { + struct hifc_hwdev *hwdev; + + struct pci_pool *cmd_buf_pool; + + struct hifc_wq *saved_wqs; + + struct hifc_cmdq_pages cmdq_pages; + struct hifc_cmdq cmdq[HIFC_MAX_CMDQ_TYPES]; + + u32 status; + u32 disable_flag; +}; + +void hifc_cmdq_ceq_handler(void *hwdev, u32 ceqe_data); + +int hifc_reinit_cmdq_ctxts(struct hifc_hwdev *hwdev); + +bool hifc_cmdq_idle(struct hifc_cmdq *cmdq); + +int hifc_cmdqs_init(struct hifc_hwdev *hwdev); + +void hifc_cmdqs_free(struct hifc_hwdev *hwdev); + +void hifc_cmdq_flush_cmd(struct hifc_hwdev *hwdev, + struct hifc_cmdq *cmdq); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_main.c b/drivers/scsi/huawei/hifc/hifc_cqm_main.c new file mode 100644 index 000000000000..4cd048f1e662 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cqm_main.c @@ -0,0 +1,694 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/vmalloc.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_cfg.h" +#include "hifc_cqm_object.h" +#include "hifc_cqm_main.h" + +#define GET_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define GET_MIN(a, b) (((a) < (b)) ? (a) : (b)) + +static void cqm_capability_init_check_ppf(void *ex_handle, + u32 *total_function_num) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + (handle->cqm_hdl); + + if (cqm_handle->func_attribute.func_type == CQM_PPF) { + *total_function_num = service_capability->host_total_function; + cqm_handle->func_capability.timer_enable = + service_capability->timer_en; + + cqm_info(handle->dev_hdl, "Cap init: total function num 0x%x\n", + *total_function_num); + cqm_info(handle->dev_hdl, "Cap init: timer_enable %d (1: enable; 0: disable)\n", + cqm_handle->func_capability.timer_enable); + } +} + +void cqm_test_mode_init(struct cqm_handle_s *cqm_handle, + struct service_cap *service_capability) +{ + cqm_handle->func_capability.xid_alloc_mode = + service_capability->test_xid_alloc_mode; + cqm_handle->func_capability.gpa_check_enable = + service_capability->test_gpa_check_enable; +} + +static s32 cqm_service_capability_init_for_each( + struct cqm_handle_s *cqm_handle, + struct service_cap *service_capability) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)cqm_handle->ex_handle; + + cqm_info(handle->dev_hdl, "Cap init: fc is valid\n"); + cqm_handle->func_capability.hash_number += + service_capability->fc_cap.dev_fc_cap.max_parent_qpc_num; + cqm_handle->func_capability.hash_basic_size = CQM_HASH_BUCKET_SIZE_64; + cqm_handle->func_capability.qpc_number += + service_capability->fc_cap.dev_fc_cap.max_parent_qpc_num; + cqm_handle->func_capability.qpc_basic_size = + GET_MAX(service_capability->fc_cap.parent_qpc_size, + cqm_handle->func_capability.qpc_basic_size); + cqm_handle->func_capability.qpc_alloc_static = true; + cqm_handle->func_capability.scqc_number += + service_capability->fc_cap.dev_fc_cap.scq_num; + cqm_handle->func_capability.scqc_basic_size = + GET_MAX(service_capability->fc_cap.scqc_size, + cqm_handle->func_capability.scqc_basic_size); + cqm_handle->func_capability.srqc_number += + service_capability->fc_cap.dev_fc_cap.srq_num; + cqm_handle->func_capability.srqc_basic_size = + GET_MAX(service_capability->fc_cap.srqc_size, + cqm_handle->func_capability.srqc_basic_size); + cqm_handle->func_capability.lun_number = CQM_LUN_FC_NUM; + cqm_handle->func_capability.lun_basic_size = CQM_LUN_SIZE_8; + cqm_handle->func_capability.taskmap_number = CQM_TASKMAP_FC_NUM; + cqm_handle->func_capability.taskmap_basic_size = PAGE_SIZE; + cqm_handle->func_capability.childc_number += + service_capability->fc_cap.dev_fc_cap.max_child_qpc_num; + cqm_handle->func_capability.childc_basic_size = + GET_MAX(service_capability->fc_cap.child_qpc_size, + cqm_handle->func_capability.childc_basic_size); + cqm_handle->func_capability.pagesize_reorder = CQM_FC_PAGESIZE_ORDER; + + return CQM_SUCCESS; +} + +s32 cqm_service_capability_init(struct cqm_handle_s *cqm_handle, + struct service_cap *service_capability) +{ + cqm_handle->service.has_register = false; + cqm_handle->service.buf_order = 0; + + if (cqm_service_capability_init_for_each( + cqm_handle, + service_capability) == CQM_FAIL) + return CQM_FAIL; + + return CQM_SUCCESS; +} + +/** + * cqm_capability_init - Initialize capability of cqm function and service, + * need to read information from the configuration management module + * @ex_handle: handle of hwdev + */ +s32 cqm_capability_init(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + (handle->cqm_hdl); + u32 total_function_num = 0; + int err = 0; + + cqm_capability_init_check_ppf(ex_handle, &total_function_num); + + cqm_handle->func_capability.flow_table_based_conn_number = + service_capability->max_connect_num; + cqm_handle->func_capability.flow_table_based_conn_cache_number = + service_capability->max_stick2cache_num; + cqm_info(handle->dev_hdl, "Cap init: cfg max_conn_num 0x%x, max_cache_conn_num 0x%x\n", + cqm_handle->func_capability.flow_table_based_conn_number, + cqm_handle->func_capability.flow_table_based_conn_cache_number); + + cqm_handle->func_capability.qpc_reserved = 0; + cqm_handle->func_capability.mpt_reserved = 0; + cqm_handle->func_capability.qpc_alloc_static = false; + cqm_handle->func_capability.scqc_alloc_static = false; + + cqm_handle->func_capability.l3i_number = CQM_L3I_COMM_NUM; + cqm_handle->func_capability.l3i_basic_size = CQM_L3I_SIZE_8; + + cqm_handle->func_capability.timer_number = CQM_TIMER_ALIGN_SCALE_NUM * + total_function_num; + cqm_handle->func_capability.timer_basic_size = CQM_TIMER_SIZE_32; + + if (cqm_service_capability_init(cqm_handle, service_capability) == + CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_service_capability_init)); + err = CQM_FAIL; + goto out; + } + + cqm_test_mode_init(cqm_handle, service_capability); + + cqm_info(handle->dev_hdl, "Cap init: pagesize_reorder %d\n", + cqm_handle->func_capability.pagesize_reorder); + cqm_info(handle->dev_hdl, "Cap init: xid_alloc_mode %d, gpa_check_enable %d\n", + cqm_handle->func_capability.xid_alloc_mode, + cqm_handle->func_capability.gpa_check_enable); + cqm_info(handle->dev_hdl, "Cap init: qpc_alloc_mode %d, scqc_alloc_mode %d\n", + cqm_handle->func_capability.qpc_alloc_static, + cqm_handle->func_capability.scqc_alloc_static); + cqm_info(handle->dev_hdl, "Cap init: hash_number 0x%x\n", + cqm_handle->func_capability.hash_number); + cqm_info(handle->dev_hdl, "Cap init: qpc_number 0x%x, qpc_reserved 0x%x\n", + cqm_handle->func_capability.qpc_number, + cqm_handle->func_capability.qpc_reserved); + cqm_info(handle->dev_hdl, "Cap init: scqc_number 0x%x scqc_reserved 0x%x\n", + cqm_handle->func_capability.scqc_number, + cqm_handle->func_capability.scq_reserved); + cqm_info(handle->dev_hdl, "Cap init: srqc_number 0x%x\n", + cqm_handle->func_capability.srqc_number); + cqm_info(handle->dev_hdl, "Cap init: mpt_number 0x%x, mpt_reserved 0x%x\n", + cqm_handle->func_capability.mpt_number, + cqm_handle->func_capability.mpt_reserved); + cqm_info(handle->dev_hdl, "Cap init: gid_number 0x%x, lun_number 0x%x\n", + cqm_handle->func_capability.gid_number, + cqm_handle->func_capability.lun_number); + cqm_info(handle->dev_hdl, "Cap init: taskmap_number 0x%x, l3i_number 0x%x\n", + cqm_handle->func_capability.taskmap_number, + cqm_handle->func_capability.l3i_number); + cqm_info(handle->dev_hdl, "Cap init: timer_number 0x%x\n", + cqm_handle->func_capability.timer_number); + cqm_info(handle->dev_hdl, "Cap init: xid2cid_number 0x%x, reorder_number 0x%x\n", + cqm_handle->func_capability.xid2cid_number, + cqm_handle->func_capability.reorder_number); + + return CQM_SUCCESS; + +out: + if (cqm_handle->func_attribute.func_type == CQM_PPF) + cqm_handle->func_capability.timer_enable = 0; + + return err; +} + +/** + * cqm_init - Initialize cqm + * @ex_handle: handle of hwdev + */ +s32 cqm_init(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + s32 ret = CQM_FAIL; + + CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle)); + + cqm_handle = (struct cqm_handle_s *)kmalloc(sizeof(struct cqm_handle_s), + GFP_KERNEL | __GFP_ZERO); + CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL, + CQM_ALLOC_FAIL(cqm_handle)); + /* Clear memory to prevent other systems' memory from being cleared */ + memset(cqm_handle, 0, sizeof(struct cqm_handle_s)); + + cqm_handle->ex_handle = handle; + cqm_handle->dev = (struct pci_dev *)(handle->pcidev_hdl); + + handle->cqm_hdl = (void *)cqm_handle; + + /* Clear statistics */ + memset(&handle->hw_stats.cqm_stats, 0, sizeof(struct hifc_cqm_stats)); + + /* Read information of vf or pf */ + cqm_handle->func_attribute = handle->hwif->attr; + cqm_info(handle->dev_hdl, "Func init: function type %d\n", + cqm_handle->func_attribute.func_type); + + /* Read ability from configuration management module */ + ret = cqm_capability_init(ex_handle); + if (ret == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_capability_init)); + goto err1; + } + + /* Initialize entries of memory table such as BAT/CLA/bitmap */ + if (cqm_mem_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_mem_init)); + goto err1; + } + + /* Initialize event callback */ + if (cqm_event_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_event_init)); + goto err2; + } + + /* Initialize doorbell */ + if (cqm_db_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_init)); + goto err3; + } + + /* The timer bitmap is set directly from the beginning through CQM, + * no longer set/clear the bitmap through ifconfig up/down + */ + if (hifc_func_tmr_bitmap_set(ex_handle, 1) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "Timer start: enable timer bitmap failed\n"); + goto err5; + } + + return CQM_SUCCESS; + +err5: + cqm_db_uninit(ex_handle); +err3: + cqm_event_uninit(ex_handle); +err2: + cqm_mem_uninit(ex_handle); +err1: + handle->cqm_hdl = NULL; + kfree(cqm_handle); + return CQM_FAIL; +} + +/** + * cqm_uninit - Deinitialize the cqm, and is called once removing a function + * @ex_handle: handle of hwdev + */ +void cqm_uninit(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + s32 ret = CQM_FAIL; + + CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle), return); + + /* The timer bitmap is set directly from the beginning through CQM, + * no longer set/clear the bitmap through ifconfig up/down + */ + cqm_info(handle->dev_hdl, "Timer stop: disable timer\n"); + if (hifc_func_tmr_bitmap_set(ex_handle, 0) != CQM_SUCCESS) + cqm_err(handle->dev_hdl, "Timer stop: disable timer bitmap failed\n"); + + /* Stopping timer, release the resource + * after a delay of one or two milliseconds + */ + if ((cqm_handle->func_attribute.func_type == CQM_PPF) && + (cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE)) { + cqm_info(handle->dev_hdl, "Timer stop: hifc ppf timer stop\n"); + ret = hifc_ppf_tmr_stop(handle); + + if (ret != CQM_SUCCESS) { + cqm_info(handle->dev_hdl, "Timer stop: hifc ppf timer stop, ret=%d\n", + ret); + /* The timer fails to stop + * and does not affect resource release + */ + } + usleep_range(900, 1000); + } + + /* Release hardware doorbell */ + cqm_db_uninit(ex_handle); + + /* Cancel the callback of chipif */ + cqm_event_uninit(ex_handle); + + /* Release all table items + * and require the service to release all objects + */ + cqm_mem_uninit(ex_handle); + + /* Release cqm_handle */ + handle->cqm_hdl = NULL; + kfree(cqm_handle); +} + +/** + * cqm_mem_init - Initialize related memory of cqm, + * including all levels of entries + * @ex_handle: handle of hwdev + */ +s32 cqm_mem_init(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + + if (cqm_bat_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_init)); + return CQM_FAIL; + } + + if (cqm_cla_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_init)); + goto err1; + } + + if (cqm_bitmap_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_init)); + goto err2; + } + + if (cqm_object_table_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_object_table_init)); + goto err3; + } + + return CQM_SUCCESS; + +err3: + cqm_bitmap_uninit(cqm_handle); +err2: + cqm_cla_uninit(cqm_handle); +err1: + cqm_bat_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * cqm_mem_uninit - Deinitialize related memory of cqm, + * including all levels of entries + * @ex_handle: handle of hwdev + */ +void cqm_mem_uninit(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + + cqm_object_table_uninit(cqm_handle); + cqm_bitmap_uninit(cqm_handle); + cqm_cla_uninit(cqm_handle); + cqm_bat_uninit(cqm_handle); +} + +/** + * cqm_event_init - Initialize the event callback of cqm + * @ex_handle: handle of hwdev + */ +s32 cqm_event_init(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + /* Register ceq and aeq callbacks with chipif */ + if (hifc_aeq_register_swe_cb(ex_handle, + HIFC_STATEFULL_EVENT, + cqm_aeq_callback) != CHIPIF_SUCCESS) { + cqm_err(handle->dev_hdl, "Event: fail to register aeq callback\n"); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_event_uninit - Deinitialize the event callback of cqm + * @ex_handle: handle of hwdev + */ +void cqm_event_uninit(void *ex_handle) +{ + (void)hifc_aeq_unregister_swe_cb(ex_handle, HIFC_STATEFULL_EVENT); +} + +/** + * cqm_db_addr_alloc - Apply for a page of hardware doorbell and dwqe, + * with the same index, all obtained are physical addresses + * each function has up to 1K + * @ex_handle: handle of hwdev + * @db_addr: the address of doorbell + * @dwqe_addr: the address of dwqe + */ +s32 cqm_db_addr_alloc(void *ex_handle, void __iomem **db_addr, + void __iomem **dwqe_addr) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle)); + CQM_PTR_CHECK_RET(db_addr, return CQM_FAIL, CQM_PTR_NULL(db_addr)); + CQM_PTR_CHECK_RET(dwqe_addr, return CQM_FAIL, CQM_PTR_NULL(dwqe_addr)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_alloc_cnt); + + return hifc_alloc_db_addr(ex_handle, db_addr, dwqe_addr); +} + +/** + * cqm_db_init - Initialize doorbell of cqm + * @ex_handle: handle of hwdev + */ +s32 cqm_db_init(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + + /* Assign hardware doorbell for service */ + service = &cqm_handle->service; + + if (cqm_db_addr_alloc(ex_handle, + &service->hardware_db_vaddr, + &service->dwqe_vaddr) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_addr_alloc)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_db_addr_free - Release a page of hardware doorbell and dwqe + * @ex_handle: handle of hwdev + * @db_addr: the address of doorbell + * @dwqe_addr: the address of dwqe + */ +void cqm_db_addr_free(void *ex_handle, void __iomem *db_addr, + void __iomem *dwqe_addr) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_free_cnt); + + hifc_free_db_addr(ex_handle, db_addr, dwqe_addr); +} + +/** + * cqm_db_uninit - Deinitialize doorbell of cqm + * @ex_handle: handle of hwdev + */ +void cqm_db_uninit(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + + /* Release hardware doorbell */ + service = &cqm_handle->service; + + cqm_db_addr_free(ex_handle, service->hardware_db_vaddr, + service->dwqe_vaddr); +} + +/** + * cqm_aeq_callback - cqm module callback processing of aeq + * @ex_handle: handle of hwdev + * @event: the input type of event + * @data: the input data + */ +u8 cqm_aeq_callback(void *ex_handle, u8 event, u64 data) +{ +#define CQM_AEQ_BASE_T_FC 48 +#define CQM_AEQ_BASE_T_FCOE 56 + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + struct service_register_template_s *service_template = NULL; + u8 event_level = FAULT_LEVEL_MAX; + + CQM_PTR_CHECK_RET(ex_handle, return event_level, + CQM_PTR_NULL(ex_handle)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_aeq_callback_cnt[event]); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_RET(cqm_handle, return event_level, + CQM_PTR_NULL(cqm_handle)); + + if (event >= (u8)CQM_AEQ_BASE_T_FC && + (event < (u8)CQM_AEQ_BASE_T_FCOE)) { + service = &cqm_handle->service; + service_template = &service->service_template; + + if (!service_template->aeq_callback) { + cqm_err(handle->dev_hdl, "Event: service aeq_callback unregistered\n"); + } else { + service_template->aeq_callback( + service_template->service_handle, event, data); + } + + return event_level; + } + + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(event)); + return CQM_FAIL; +} + +/** + * cqm_service_register - Service driver registers callback template with cqm + * @ex_handle: handle of hwdev + * @service_template: the template of service registration + */ +s32 cqm_service_register(void *ex_handle, + struct service_register_template_s *service_template) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + + CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle)); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL, + CQM_PTR_NULL(cqm_handle)); + CQM_PTR_CHECK_RET(service_template, return CQM_FAIL, + CQM_PTR_NULL(service_template)); + + service = &cqm_handle->service; + + if (service->has_register == true) { + cqm_err(handle->dev_hdl, "Service register: service has registered\n"); + return CQM_FAIL; + } + + service->has_register = true; + (void)memcpy((void *)(&service->service_template), + (void *)service_template, + sizeof(struct service_register_template_s)); + + return CQM_SUCCESS; +} + +/** + * cqm_service_unregister - Service-driven cancellation to CQM + * @ex_handle: handle of hwdev + * @service_type: the type of service module + */ +void cqm_service_unregister(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + + CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle), return); + + service = &cqm_handle->service; + + service->has_register = false; + memset(&service->service_template, 0, + sizeof(struct service_register_template_s)); +} + +/** + * cqm_cmd_alloc - Apply for a cmd buffer, the buffer size is fixed at 2K, + * the buffer content is not cleared, but the service needs to be cleared + * @ex_handle: handle of hwdev + */ +struct cqm_cmd_buf_s *cqm_cmd_alloc(void *ex_handle) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + CQM_PTR_CHECK_RET(ex_handle, return NULL, CQM_PTR_NULL(ex_handle)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_alloc_cnt); + + return (struct cqm_cmd_buf_s *)hifc_alloc_cmd_buf(ex_handle); +} + +/** + * cqm_cmd_free - Free a cmd buffer + * @ex_handle: handle of hwdev + * @cmd_buf: the cmd buffer which needs freeing memory for + */ +void cqm_cmd_free(void *ex_handle, struct cqm_cmd_buf_s *cmd_buf) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle), return); + CQM_PTR_CHECK_NO_RET(cmd_buf, CQM_PTR_NULL(cmd_buf), return); + CQM_PTR_CHECK_NO_RET(cmd_buf->buf, CQM_PTR_NULL(buf), return); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_free_cnt); + + hifc_free_cmd_buf(ex_handle, (struct hifc_cmd_buf *)cmd_buf); +} + +/** + * cqm_send_cmd_box - Send a cmd in box mode, + * the interface will hang the completed amount, causing sleep + * @ex_handle: handle of hwdev + * @ack_type: the type of ack + * @mod: the mode of cqm send + * @cmd: the input cmd + * @buf_in: the input buffer of cqm_cmd + * @buf_out: the output buffer of cqm_cmd + * @timeout: exceeding the time limit will cause sleep + */ +s32 cqm_send_cmd_box(void *ex_handle, u8 ack_type, u8 mod, u8 cmd, + struct cqm_cmd_buf_s *buf_in, + struct cqm_cmd_buf_s *buf_out, u32 timeout) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + + CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle)); + CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, CQM_PTR_NULL(buf_in)); + CQM_PTR_CHECK_RET(buf_in->buf, return CQM_FAIL, CQM_PTR_NULL(buf)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_send_cmd_box_cnt); + + return hifc_cmdq_detail_resp(ex_handle, ack_type, mod, cmd, + (struct hifc_cmd_buf *)buf_in, + (struct hifc_cmd_buf *)buf_out, timeout); +} + +/** + * cqm_ring_hardware_db - Knock hardware doorbell + * @ex_handle: handle of hwdev + * @service_type: each kernel mode will be allocated a page of hardware doorbell + * @db_count: PI exceeding 64b in doorbell[7:0] + * @db: doorbell content, organized by the business, + * if there is a small-end conversion, the business needs to be completed + */ +s32 cqm_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, u64 db) +{ + struct hifc_hwdev *handle; + struct cqm_handle_s *cqm_handle; + struct cqm_service_s *service; + + handle = (struct hifc_hwdev *)ex_handle; + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + service = &cqm_handle->service; + + /* Write all before the doorbell */ + wmb(); + *((u64 *)service->hardware_db_vaddr + db_count) = db; + + return CQM_SUCCESS; +} diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_main.h b/drivers/scsi/huawei/hifc/hifc_cqm_main.h new file mode 100644 index 000000000000..70b0c9ae0609 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cqm_main.h @@ -0,0 +1,366 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __CQM_MAIN_H__ +#define __CQM_MAIN_H__ + +#define CHIPIF_SUCCESS 0 +#define CQM_TIMER_ENABLE 1 + +enum cqm_object_type_e { + CQM_OBJECT_ROOT_CTX = 0, + CQM_OBJECT_SERVICE_CTX, + CQM_OBJECT_NONRDMA_EMBEDDED_RQ = 10, + CQM_OBJECT_NONRDMA_EMBEDDED_SQ, + CQM_OBJECT_NONRDMA_SRQ, + CQM_OBJECT_NONRDMA_EMBEDDED_CQ, + CQM_OBJECT_NONRDMA_SCQ, +}; + +struct service_register_template_s { + u32 service_type; + u32 srq_ctx_size; /* srq,scq context_size config */ + u32 scq_ctx_size; + void *service_handle; /* ceq/aeq callback fun */ + + void (*aeq_callback)(void *service_handle, u8 event_type, u64 val); +}; + +struct cqm_service_s { + bool has_register; + void __iomem *hardware_db_vaddr; + void __iomem *dwqe_vaddr; + u32 buf_order; /* size of per buf 2^buf_order page */ + struct service_register_template_s service_template; +}; + +struct cqm_func_capability_s { + bool qpc_alloc_static; /* Allocate qpc memory dynamicly/statically */ + bool scqc_alloc_static; + u8 timer_enable; /* whether timer enable */ + + u32 flow_table_based_conn_number; + u32 flow_table_based_conn_cache_number; /* Maximum number in cache */ + u32 bloomfilter_length; /* Bloomfilter table size, aligned by 64B */ + /* The starting position of the bloomfilter table in the cache */ + u32 bloomfilter_addr; + u32 qpc_reserved; /* Reserved bits in bitmap */ + u32 mpt_reserved; /* There are also reserved bits in ROCE/IWARP mpt */ + /* All basic_size must be 2^n aligned */ + u32 hash_number; + /* Number of hash buckets, BAT table fill size is + * aligned with 64 buckets, at least 64 + */ + u32 hash_basic_size; + /* Hash bucket size is 64B, including 5 valid + * entries and 1 nxt_entry + */ + u32 qpc_number; + u32 qpc_basic_size; + + /* Note: for cqm specail test */ + u32 pagesize_reorder; + bool xid_alloc_mode; + bool gpa_check_enable; + u32 scq_reserved; + + u32 mpt_number; + u32 mpt_basic_size; + u32 scqc_number; + u32 scqc_basic_size; + u32 srqc_number; + u32 srqc_basic_size; + + u32 gid_number; + u32 gid_basic_size; + u32 lun_number; + u32 lun_basic_size; + u32 taskmap_number; + u32 taskmap_basic_size; + u32 l3i_number; + u32 l3i_basic_size; + u32 childc_number; + u32 childc_basic_size; + u32 child_qpc_id_start; /* Child ctx of FC is global addressing */ + /* The maximum number of child ctx in + * chip is 8096 + */ + u32 childc_number_all_function; + + u32 timer_number; + u32 timer_basic_size; + u32 xid2cid_number; + u32 xid2cid_basic_size; + u32 reorder_number; + u32 reorder_basic_size; +}; + +#define CQM_PF TYPE_PF +#define CQM_PPF TYPE_PPF +#define CQM_BAT_ENTRY_MAX (16) +#define CQM_BAT_ENTRY_SIZE (16) + +struct cqm_buf_list_s { + void *va; + dma_addr_t pa; + u32 refcount; +}; + +struct cqm_buf_s { + struct cqm_buf_list_s *buf_list; + struct cqm_buf_list_s direct; + u32 page_number; /* page_number=2^n buf_number */ + u32 buf_number; /* buf_list node count */ + u32 buf_size; /* buf_size=2^n PAGE_SIZE */ +}; + +struct cqm_bitmap_s { + ulong *table; + u32 max_num; + u32 last; + /* The index that cannot be allocated is reserved in the front */ + u32 reserved_top; + /* Lock for bitmap allocation */ + spinlock_t lock; +}; + +struct completion; +struct cqm_object_s { + u32 service_type; + u32 object_type; /* context,queue,mpt,mtt etc */ + u32 object_size; + /* for queue, ctx, MPT Byte */ + atomic_t refcount; + struct completion free; + void *cqm_handle; +}; + +struct cqm_object_table_s { + struct cqm_object_s **table; + u32 max_num; + rwlock_t lock; +}; + +struct cqm_cla_table_s { + u32 type; + u32 max_buffer_size; + u32 obj_num; + bool alloc_static; /* Whether the buffer is statically allocated */ + u32 cla_lvl; + /* The value of x calculated by the cacheline, used for chip */ + u32 cacheline_x; + /* The value of y calculated by the cacheline, used for chip */ + u32 cacheline_y; + /* The value of z calculated by the cacheline, used for chip */ + u32 cacheline_z; + /* The value of x calculated by the obj_size, used for software */ + u32 x; + /* The value of y calculated by the obj_size, used for software */ + u32 y; + /* The value of z calculated by the obj_size, used for software */ + u32 z; + struct cqm_buf_s cla_x_buf; + struct cqm_buf_s cla_y_buf; + struct cqm_buf_s cla_z_buf; + u32 trunk_order;/* A continuous physical page contains 2^order pages */ + u32 obj_size; + /* Lock for cla buffer allocation and free */ + struct mutex lock; + struct cqm_bitmap_s bitmap; + /* The association mapping table of index and object */ + struct cqm_object_table_s obj_table; +}; + +typedef void (*init_handler)(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap); + +struct cqm_cla_entry_init_s { + u32 type; + init_handler cqm_cla_init_handler; +}; + +struct cqm_bat_table_s { + u32 bat_entry_type[CQM_BAT_ENTRY_MAX]; + u8 bat[CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE]; + struct cqm_cla_table_s entry[CQM_BAT_ENTRY_MAX]; + u32 bat_size; +}; + +struct cqm_handle_s { + struct hifc_hwdev *ex_handle; + struct pci_dev *dev; + struct hifc_func_attr func_attribute; /* vf or pf */ + struct cqm_func_capability_s func_capability; + struct cqm_service_s service; + struct cqm_bat_table_s bat_table; + + struct list_head node; +}; + +struct cqm_cmd_buf_s { + void *buf; + dma_addr_t dma; + u16 size; +}; + +struct cqm_queue_header_s { + u64 doorbell_record; + u64 ci_record; + u64 rsv1; /* the share area bettween driver and ucode */ + u64 rsv2; /* the share area bettween driver and ucode*/ +}; + +struct cqm_queue_s { + struct cqm_object_s object; + u32 index; /* embedded queue QP has not index, SRQ and SCQ have */ + void *priv; /* service driver private info */ + u32 current_q_doorbell; + u32 current_q_room; + /* nonrdma: only select q_room_buf_1 for q_room_buf */ + struct cqm_buf_s q_room_buf_1; + struct cqm_buf_s q_room_buf_2; + struct cqm_queue_header_s *q_header_vaddr; + dma_addr_t q_header_paddr; + u8 *q_ctx_vaddr; /* SRQ and SCQ ctx space */ + dma_addr_t q_ctx_paddr; + u32 valid_wqe_num; + /*add for srq*/ + u8 *tail_container; + u8 *head_container; + u8 queue_link_mode; /*link,ring */ +}; + +struct cqm_nonrdma_qinfo_s { + struct cqm_queue_s common; + u32 wqe_size; + /* The number of wqe contained in each buf (excluding link wqe), + * For srq, it is the number of wqe contained in 1 container + */ + u32 wqe_per_buf; + u32 q_ctx_size; + /* When different services use different sizes of ctx, a large ctx will + * occupy multiple consecutive indexes of the bitmap + */ + u32 index_count; + u32 container_size; +}; + +/* service context, QPC, mpt */ +struct cqm_qpc_mpt_s { + struct cqm_object_s object; + u32 xid; + dma_addr_t paddr; + void *priv; /* service driver private info */ + u8 *vaddr; +}; + +struct cqm_qpc_mpt_info_s { + struct cqm_qpc_mpt_s common; + /* When different services use different sizes of QPC, large QPC/mpt + * will occupy multiple consecutive indexes of the bitmap + */ + u32 index_count; +}; + +#define CQM_ADDR_COMBINE(high_addr, low_addr) \ + ((((dma_addr_t)(high_addr)) << 32) + ((dma_addr_t)(low_addr))) +#define CQM_ADDR_HI(addr) ((u32)((u64)(addr) >> 32)) +#define CQM_ADDR_LW(addr) ((u32)((u64)(addr) & 0xffffffff)) +#define CQM_HASH_BUCKET_SIZE_64 (64) +#define CQM_LUN_SIZE_8 (8) +#define CQM_L3I_SIZE_8 (8) +#define CQM_TIMER_SIZE_32 (32) +#define CQM_LUN_FC_NUM (64) +#define CQM_TASKMAP_FC_NUM (4) +#define CQM_L3I_COMM_NUM (64) +#define CQM_TIMER_SCALE_NUM (2*1024) +#define CQM_TIMER_ALIGN_WHEEL_NUM (8) +#define CQM_TIMER_ALIGN_SCALE_NUM \ + (CQM_TIMER_SCALE_NUM*CQM_TIMER_ALIGN_WHEEL_NUM) +#define CQM_FC_PAGESIZE_ORDER (0) +#define CQM_QHEAD_ALIGN_ORDER (6) + +s32 cqm_mem_init(void *ex_handle); +void cqm_mem_uninit(void *ex_handle); +s32 cqm_event_init(void *ex_handle); +void cqm_event_uninit(void *ex_handle); +s32 cqm_db_init(void *ex_handle); +void cqm_db_uninit(void *ex_handle); +s32 cqm_init(void *ex_handle); +void cqm_uninit(void *ex_handle); +s32 cqm_service_register(void *ex_handle, + struct service_register_template_s *service_template); +void cqm_service_unregister(void *ex_handle); +s32 cqm_ring_hardware_db(void *ex_handle, + u32 service_type, + u8 db_count, u64 db); +s32 cqm_send_cmd_box(void *ex_handle, u8 ack_type, u8 mod, u8 cmd, + struct cqm_cmd_buf_s *buf_in, + struct cqm_cmd_buf_s *buf_out, + u32 timeout); +u8 cqm_aeq_callback(void *ex_handle, u8 event, u64 data); +void cqm_object_delete(struct cqm_object_s *object); +struct cqm_cmd_buf_s *cqm_cmd_alloc(void *ex_handle); +void cqm_cmd_free(void *ex_handle, struct cqm_cmd_buf_s *cmd_buf); +struct cqm_queue_s *cqm_object_fc_srq_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 wqe_number, + u32 wqe_size, + void *object_priv); +struct cqm_qpc_mpt_s *cqm_object_qpc_mpt_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 object_size, + void *object_priv, + u32 index); +struct cqm_queue_s *cqm_object_nonrdma_queue_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 wqe_number, + u32 wqe_size, + void *object_priv); + +#define CQM_PTR_NULL(x) "%s: "#x" is null\n", __func__ +#define CQM_ALLOC_FAIL(x) "%s: "#x" alloc fail\n", __func__ +#define CQM_MAP_FAIL(x) "%s: "#x" map fail\n", __func__ +#define CQM_FUNCTION_FAIL(x) "%s: "#x" return failure\n", __func__ +#define CQM_WRONG_VALUE(x) "%s: "#x" %u is wrong\n", __func__, (u32)x + +#define cqm_err(dev, format, ...) \ + dev_err(dev, "[CQM]"format, ##__VA_ARGS__) +#define cqm_warn(dev, format, ...) \ + dev_warn(dev, "[CQM]"format, ##__VA_ARGS__) +#define cqm_notice(dev, format, ...) \ + dev_notice(dev, "[CQM]"format, ##__VA_ARGS__) +#define cqm_info(dev, format, ...) \ + dev_info(dev, "[CQM]"format, ##__VA_ARGS__) +#define cqm_dbg(format, ...) + +#define CQM_PTR_CHECK_RET(ptr, ret, desc) \ + do {\ + if (unlikely(NULL == (ptr))) {\ + pr_err("[CQM]"desc);\ + ret; \ + } \ + } while (0) + +#define CQM_PTR_CHECK_NO_RET(ptr, desc, ret) \ + do {\ + if (unlikely((ptr) == NULL)) {\ + pr_err("[CQM]"desc);\ + ret; \ + } \ + } while (0) +#define CQM_CHECK_EQUAL_RET(dev_hdl, actual, expect, ret, desc) \ + do {\ + if (unlikely((expect) != (actual))) {\ + cqm_err(dev_hdl, desc);\ + ret; \ + } \ + } while (0) + +#endif /* __CQM_MAIN_H__ */ diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_object.c b/drivers/scsi/huawei/hifc/hifc_cqm_object.c new file mode 100644 index 000000000000..406b13f92e64 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cqm_object.c @@ -0,0 +1,3599 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <linux/mm.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_cfg.h" +#include "hifc_cqm_object.h" +#include "hifc_cqm_main.h" +#define common_section + +#define CQM_MOD_CQM 8 +#define CQM_HARDWARE_DOORBELL 1 +/** + * cqm_swab64 - Convert a memory block to another endian by 8 byte basis + * @addr: start address of the memory block + * @cnt: the number of 8 byte basis in the memory block + */ +void cqm_swab64(u8 *addr, u32 cnt) +{ + u32 i = 0; + u64 *temp = (u64 *)addr; + u64 value = 0; + + for (i = 0; i < cnt; i++) { + value = __swab64(*temp); + *temp = value; + temp++; + } +} + +/** + * cqm_swab32 - Convert a memory block to another endian by 4 byte basis + * @addr: start address of the memory block + * @cnt: the number of 4 byte basis in the memory block + */ +void cqm_swab32(u8 *addr, u32 cnt) +{ + u32 i = 0; + u32 *temp = (u32 *)addr; + u32 value = 0; + + for (i = 0; i < cnt; i++) { + value = __swab32(*temp); + *temp = value; + temp++; + } +} + +/** + * cqm_shift - Find the base logarithm of two + * @data: the input data + */ +s32 cqm_shift(u32 data) +{ + s32 shift = -1; + + do { + data >>= 1; + shift++; + } while (data); + + return shift; +} + +/** + * cqm_check_align - Check whether the data is aligned as the base of 2^n + * @data: the input data + */ +bool cqm_check_align(u32 data) +{ + if (data == 0) + return false; + + do { + /* If data can be divided exactly by 2, + * it right shifts one bit + */ + if ((data & 0x1) == 0) { + data >>= 1; + } else { + /* If data can not be divided exactly by 2 + * it is not the base of 2^n,return false + */ + return false; + } + } while (data != 1); + + return true; +} + +/** + * cqm_kmalloc_align - Alloc memory whose start address is aligned as the basis + * of 2^n + * @size: the size of memory allocated + * @flags: the type of memory allocated + * @align_order: the basis for aligning + */ +static void *cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order) +{ + void *orig_addr = NULL; + void *align_addr = NULL; + void *index_addr = NULL; + + orig_addr = kmalloc(size + ((u64)1 << align_order) + sizeof(void *), + flags); + if (!orig_addr) + return NULL; + + index_addr = (void *)((char *)orig_addr + sizeof(void *)); + align_addr = (void *)((((u64)index_addr + + ((u64)1 << align_order) - 1) >> align_order) << align_order); + + /* Record the original memory address for memory release. */ + index_addr = (void *)((char *)align_addr - sizeof(void *)); + *(void **)index_addr = orig_addr; + + cqm_dbg("allocate %lu bytes aligned address: %p, original address: %p\n", + size, align_addr, orig_addr); + + return align_addr; +} + +/** + * cqm_kfree_align - Free memory whose start address is aligned as the basis of + * 2^n + * @addr: aligned address which would be free + */ +static void cqm_kfree_align(void *addr) +{ + void *index_addr = NULL; + + /* Release original memory address */ + index_addr = (void *)((char *)addr - sizeof(void *)); + + cqm_dbg("free aligned address: %p, original address: %p\n", + addr, *(void **)index_addr); + + kfree(*(void **)index_addr); +} + +/** + * cqm_buf_alloc_page - Alloc total pages memory for buffers + * @cqm_handle: handle of cqm + * @buf: the buffer which needs allocating memory for + */ +s32 cqm_buf_alloc_page(struct cqm_handle_s *cqm_handle, struct cqm_buf_s *buf) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + u32 order = 0; + void *va = NULL; + s32 i = 0; + + order = get_order(buf->buf_size); + + /*Here to allocate for every buffer's page for non-ovs*/ + for (i = 0; i < (s32)buf->buf_number; i++) { + va = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!va) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(buf_page)); + break; + } + /* Pages should be initialized to 0 after applied + * especially related to the hash table + */ + memset(va, 0, buf->buf_size); + buf->buf_list[i].va = va; + } + + if (i != buf->buf_number) { + i--; + for (; i >= 0; i--) { + free_pages((ulong)(buf->buf_list[i].va), order); + buf->buf_list[i].va = NULL; + } + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_buf_alloc_map - Buffer pci mapping + * @cqm_handle: handle of cqm + * @buf: the buffer which needs map + */ +s32 cqm_buf_alloc_map(struct cqm_handle_s *cqm_handle, struct cqm_buf_s *buf) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct pci_dev *dev = cqm_handle->dev; + s32 i = 0; + void *va = NULL; + + for (i = 0; i < (s32)buf->buf_number; i++) { + va = buf->buf_list[i].va; + buf->buf_list[i].pa = + pci_map_single(dev, va, buf->buf_size, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev, buf->buf_list[i].pa)) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_list)); + break; + } + } + + if (i != buf->buf_number) { + i--; + for (; i >= 0; i--) { + pci_unmap_single(dev, buf->buf_list[i].pa, + buf->buf_size, PCI_DMA_BIDIRECTIONAL); + } + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_buf_alloc_direct - Buffer pci direct remapping + * @cqm_handle: handle of cqm + * @buf: the buffer which needs remap + */ +s32 cqm_buf_alloc_direct(struct cqm_handle_s *cqm_handle, + struct cqm_buf_s *buf, bool direct) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct page **pages = NULL; + u32 order = 0; + u32 i = 0; + u32 j = 0; + + order = get_order(buf->buf_size); + + if (direct == true) { + pages = (struct page **) + vmalloc(sizeof(struct page *) * buf->page_number); + if (!pages) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(pages)); + return CQM_FAIL; + } + + for (i = 0; i < buf->buf_number; i++) { + for (j = 0; j < ((u32)1 << order); j++) { + pages[(i << order) + j] = (struct page *) + (void *)virt_to_page( + (u8 *)(buf->buf_list[i].va) + + (PAGE_SIZE * j)); + } + } + + /*lint -save -e648 + *Shield alarm for kernel functions' vmapping + */ + buf->direct.va = vmap(pages, buf->page_number, + VM_MAP, PAGE_KERNEL); + /*lint -restore*/ + vfree(pages); + if (!buf->direct.va) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf->direct.va)); + return CQM_FAIL; + } + } else { + buf->direct.va = NULL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_buf_alloc - Allocate for buffer and dma for the struct cqm_buf_s + * @cqm_handle: handle of cqm + * @buf: the buffer which needs allocating memory for and dma + */ +s32 cqm_buf_alloc(struct cqm_handle_s *cqm_handle, + struct cqm_buf_s *buf, bool direct) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct pci_dev *dev = cqm_handle->dev; + u32 order = 0; + s32 i = 0; + + order = get_order(buf->buf_size); + + /* Allocate for the descriptor space of buffer lists */ + buf->buf_list = (struct cqm_buf_list_s *) + vmalloc(buf->buf_number * + sizeof(struct cqm_buf_list_s)); + + CQM_PTR_CHECK_RET(buf->buf_list, return CQM_FAIL, + CQM_ALLOC_FAIL(buf_list)); + memset(buf->buf_list, 0, + buf->buf_number * sizeof(struct cqm_buf_list_s)); + + /* Allocate for every buffer's page */ + if (cqm_buf_alloc_page(cqm_handle, buf) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc_page)); + goto err1; + } + + /* Buffer pci remapping */ + if (cqm_buf_alloc_map(cqm_handle, buf) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc_map)); + goto err2; + } + + /* Buffer pci mapping */ + if (cqm_buf_alloc_direct(cqm_handle, buf, direct) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_buf_alloc_direct)); + goto err3; + } + + return CQM_SUCCESS; + +err3: + for (i = 0; i < (s32)buf->buf_number; i++) { + pci_unmap_single(dev, buf->buf_list[i].pa, buf->buf_size, + PCI_DMA_BIDIRECTIONAL); + } +err2: + for (i = 0; i < (s32)buf->buf_number; i++) { + free_pages((ulong)(buf->buf_list[i].va), order); + buf->buf_list[i].va = NULL; + } +err1: + vfree(buf->buf_list); + buf->buf_list = NULL; + return CQM_FAIL; +} + +/** + * cqm_cla_cache_invalid - Set the chip logical address cache invalid + * @cqm_handle: handle of cqm + * @gpa: global physical address + * @cache_size: chip cache size + */ +s32 cqm_cla_cache_invalid(struct cqm_handle_s *cqm_handle, dma_addr_t gpa, + u32 cache_size) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_cmd_buf_s *buf_in = NULL; + struct cqm_cla_cache_invalid_cmd_s *cmd = NULL; + s32 ret = CQM_FAIL; + + buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle)); + CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, + CQM_ALLOC_FAIL(buf_in)); + buf_in->size = sizeof(struct cqm_cla_cache_invalid_cmd_s); + + /* Fill command format, and turn into big endian */ + cmd = (struct cqm_cla_cache_invalid_cmd_s *)(buf_in->buf); + cmd->cache_size = cache_size; + cmd->gpa_h = CQM_ADDR_HI(gpa); + cmd->gpa_l = CQM_ADDR_LW(gpa); + + cqm_swab32((u8 *)cmd, + (sizeof(struct cqm_cla_cache_invalid_cmd_s) >> 2)); + + /* cmdq send a cmd */ + ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle), + CQM_CMD_ACK_TYPE_CMDQ, + CQM_MOD_CQM, CQM_CMD_T_CLA_CACHE_INVALID, + buf_in, NULL, CQM_CMD_TIMEOUT); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(handle->dev_hdl, "Cla cache invalid: cqm_send_cmd_box_ret=%d\n", + ret); + cqm_err(handle->dev_hdl, "Cla cache invalid: cla_cache_invalid_cmd: 0x%x 0x%x 0x%x\n", + cmd->gpa_h, cmd->gpa_l, cmd->cache_size); + } + + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return ret; +} + +/** + * cqm_buf_free - Free buffer space and dma for the struct cqm_buf_s + * @buf: the buffer which needs freeing memory for + * @dev: specific pci device + */ +void cqm_buf_free(struct cqm_buf_s *buf, struct pci_dev *dev) +{ + u32 order = 0; + s32 i = 0; + + order = get_order(buf->buf_size); + + if (buf->direct.va) { + vunmap(buf->direct.va); + buf->direct.va = NULL; + } + + if (buf->buf_list) { + for (i = 0; i < (s32)(buf->buf_number); i++) { + if (buf->buf_list[i].va) { + pci_unmap_single(dev, buf->buf_list[i].pa, + buf->buf_size, + PCI_DMA_BIDIRECTIONAL); + free_pages((ulong)(buf->buf_list[i].va), order); + buf->buf_list[i].va = NULL; + } + } + + vfree(buf->buf_list); + buf->buf_list = NULL; + } +} + +/** + * __free_cache_inv - Free cache and make buffer list invalid + * @cqm_handle: handle of cqm + * @buf: the buffer which needs freeing memory for + * @inv_flag: invalid or not + * @order:the basis for aligning + * @buf_idx:buffer index + */ +static void __free_cache_inv(struct cqm_handle_s *cqm_handle, + struct cqm_buf_s *buf, s32 *inv_flag, + u32 order, s32 buf_idx) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + if (handle->chip_present_flag) { + *inv_flag = cqm_cla_cache_invalid(cqm_handle, + buf->buf_list[buf_idx].pa, + PAGE_SIZE << order); + if (*inv_flag != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "Buffer free: fail to invalid buf_list pa cache, inv_flag=%d\n", + *inv_flag); + } + } + + pci_unmap_single(cqm_handle->dev, buf->buf_list[buf_idx].pa, + buf->buf_size, PCI_DMA_BIDIRECTIONAL); + + free_pages((unsigned long)(buf->buf_list[buf_idx].va), order); + + buf->buf_list[buf_idx].va = NULL; +} + +/** + * cqm_buf_free_cache_inv - Free cache and make buffer list invalid + * @cqm_handle: handle of cqm + * @buf: the buffer which needs freeing memory for + * @inv_flag: invalid or not + */ +void cqm_buf_free_cache_inv(struct cqm_handle_s *cqm_handle, + struct cqm_buf_s *buf, s32 *inv_flag) +{ + u32 order = 0; + s32 i = 0; + + order = get_order(buf->buf_size); + + if (buf->direct.va) { + vunmap(buf->direct.va); + buf->direct.va = NULL; + } + + if (buf->buf_list) { + for (i = 0; i < (s32)(buf->buf_number); i++) { + if (buf->buf_list[i].va) { + __free_cache_inv(cqm_handle, buf, + inv_flag, order, i); + } + } + + vfree(buf->buf_list); + buf->buf_list = NULL; + } +} + +#define bat_cla_section + +/** + * cqm_bat_update - Send cmds to the tile to update the BAT table through cmdq + * @cqm_handle: cqm handle + * Return: 0 - success, negative - failure + */ +s32 cqm_bat_update(struct cqm_handle_s *cqm_handle) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_cmd_buf_s *buf_in = NULL; + s32 ret = CQM_FAIL; + struct cqm_bat_update_cmd_s *bat_update_cmd = NULL; + + /* Allocate a cmd and fill */ + buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle)); + CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); + buf_in->size = sizeof(struct cqm_bat_update_cmd_s); + + bat_update_cmd = (struct cqm_bat_update_cmd_s *)(buf_in->buf); + bat_update_cmd->byte_len = cqm_handle->bat_table.bat_size; + bat_update_cmd->offset = 0; + memcpy(bat_update_cmd->data, cqm_handle->bat_table.bat, + bat_update_cmd->byte_len); + + /*Big endian conversion*/ + cqm_swab32((u8 *)bat_update_cmd, + sizeof(struct cqm_bat_update_cmd_s) >> 2); + + /* send a cmd */ + ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle), + CQM_CMD_ACK_TYPE_CMDQ, CQM_MOD_CQM, + CQM_CMD_T_BAT_UPDATE, buf_in, + NULL, CQM_CMD_TIMEOUT); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(handle->dev_hdl, "Bat update: send_cmd_box ret=%d\n", + ret); + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return CQM_FAIL; + } + + /* Free a cmd */ + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + + return CQM_SUCCESS; +} + +s32 cqm_bat_init_ft(struct cqm_handle_s *cqm_handle, + struct cqm_bat_table_s *bat_table, + enum func_type function_type) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + if (function_type == CQM_PF || function_type == CQM_PPF) { + bat_table->bat_entry_type[0] = CQM_BAT_ENTRY_T_CFG; + bat_table->bat_entry_type[1] = CQM_BAT_ENTRY_T_HASH; + bat_table->bat_entry_type[2] = CQM_BAT_ENTRY_T_QPC; + bat_table->bat_entry_type[3] = CQM_BAT_ENTRY_T_SCQC; + bat_table->bat_entry_type[4] = CQM_BAT_ENTRY_T_LUN; + bat_table->bat_entry_type[5] = CQM_BAT_ENTRY_T_TASKMAP; + bat_table->bat_entry_type[6] = CQM_BAT_ENTRY_T_L3I; + bat_table->bat_entry_type[7] = CQM_BAT_ENTRY_T_CHILDC; + bat_table->bat_entry_type[8] = CQM_BAT_ENTRY_T_TIMER; + bat_table->bat_entry_type[9] = CQM_BAT_ENTRY_T_XID2CID; + bat_table->bat_entry_type[10] = CQM_BAT_ENTRY_T_REORDER; + bat_table->bat_size = CQM_BAT_SIZE_FT_PF; + } else { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(function_type)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_bat_init - Initialize the BAT table, only select the items to be + * initialized and arrange the entry order, the content of the BAT table entry + * needs to be filled after the CLA allocation + * @cqm_handle: cqm handle + * Return: 0 - success, negative - failure + */ +s32 cqm_bat_init(struct cqm_handle_s *cqm_handle) +{ + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + u32 i = 0; + + memset(bat_table, 0, sizeof(struct cqm_bat_table_s)); + + /* Initialize the type of each bat entry */ + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; + + if (cqm_bat_init_ft(cqm_handle, bat_table, + cqm_handle->func_attribute.func_type) == CQM_FAIL) { + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_bat_uninit - Deinitialize BAT table + * @cqm_handle: cqm handle + */ +void cqm_bat_uninit(struct cqm_handle_s *cqm_handle) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; + + memset(bat_table->bat, 0, CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE); + + /* Notify the chip to refresh the BAT table */ + if (cqm_bat_update(cqm_handle) != CQM_SUCCESS) + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update)); +} + +static void cqm_bat_config_entry_size( + struct cqm_cla_table_s *cla_table, + struct cqm_bat_entry_standerd_s *bat_entry_standerd) +{ + /* Except for QPC of 256/512/1024, the others are all cacheline 256B, + * and the conversion will be done inside the chip + */ + if (cla_table->obj_size > CQM_CHIP_CACHELINE) { + if (cla_table->obj_size == 512) { + bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_512; + } else { + bat_entry_standerd->entry_size = + CQM_BAT_ENTRY_SIZE_1024; + } + bat_entry_standerd->max_number = + cla_table->max_buffer_size / cla_table->obj_size; + } else { + bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_256; + bat_entry_standerd->max_number = + cla_table->max_buffer_size / CQM_CHIP_CACHELINE; + } +} + +void cqm_bat_fill_cla_std_entry(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u8 *entry_base_addr, u32 entry_type, + u8 gpa_check_enable) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_bat_entry_standerd_s *bat_entry_standerd = NULL; + dma_addr_t pa = 0; + + if (cla_table->obj_num == 0) { + cqm_info(handle->dev_hdl, "Cla alloc: cla_type %u, obj_num=0, don't init bat entry\n", + cla_table->type); + return; + } + + bat_entry_standerd = (struct cqm_bat_entry_standerd_s *)entry_base_addr; + cqm_bat_config_entry_size(cla_table, bat_entry_standerd); + bat_entry_standerd->max_number = bat_entry_standerd->max_number - 1; + + bat_entry_standerd->bypass = CQM_BAT_NO_BYPASS_CACHE; + bat_entry_standerd->z = cla_table->cacheline_z; + bat_entry_standerd->y = cla_table->cacheline_y; + bat_entry_standerd->x = cla_table->cacheline_x; + bat_entry_standerd->cla_level = cla_table->cla_lvl; + + if (cla_table->cla_lvl == CQM_CLA_LVL_0) + pa = cla_table->cla_z_buf.buf_list[0].pa; + else if (cla_table->cla_lvl == CQM_CLA_LVL_1) + pa = cla_table->cla_y_buf.buf_list[0].pa; + else + pa = cla_table->cla_x_buf.buf_list[0].pa; + + bat_entry_standerd->cla_gpa_h = CQM_ADDR_HI(pa); + if (entry_type == CQM_BAT_ENTRY_T_REORDER) { + /* Reorder does not support GPA validity check */ + bat_entry_standerd->cla_gpa_l = CQM_ADDR_LW(pa); + } else { + /* GPA is valid when gpa[0]=1 */ + bat_entry_standerd->cla_gpa_l = + CQM_ADDR_LW(pa) | gpa_check_enable; + } +} + +static void cqm_bat_fill_cla_cfg(struct cqm_handle_s *cqm_handle, + u8 *entry_base_addr) +{ + struct cqm_bat_entry_cfg_s *bat_entry_cfg = + (struct cqm_bat_entry_cfg_s *)entry_base_addr; + + bat_entry_cfg->cur_conn_cache = 0; + bat_entry_cfg->max_conn_cache = + cqm_handle->func_capability.flow_table_based_conn_cache_number; + bat_entry_cfg->cur_conn_num_h_4 = 0; + bat_entry_cfg->cur_conn_num_l_16 = 0; + bat_entry_cfg->max_conn_num = + cqm_handle->func_capability.flow_table_based_conn_number; + /* Align by 64 buckets, shift right 6 bits */ + if ((cqm_handle->func_capability.hash_number >> 6) != 0) { + /* After shift right 6 bits, the value should - 1 for the hash + * value + */ + bat_entry_cfg->bucket_num = + ((cqm_handle->func_capability.hash_number >> 6) - 1); + } + if (cqm_handle->func_capability.bloomfilter_length != 0) { + bat_entry_cfg->bloom_filter_len = + cqm_handle->func_capability.bloomfilter_length - 1; + bat_entry_cfg->bloom_filter_addr = + cqm_handle->func_capability.bloomfilter_addr; + } +} + +static void cqm_bat_fill_cla_taskmap(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u8 *entry_base_addr) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_bat_entry_taskmap_s *bat_entry_taskmap = + (struct cqm_bat_entry_taskmap_s *)entry_base_addr; + if (cqm_handle->func_capability.taskmap_number != 0) { + bat_entry_taskmap->gpa0_h = + (u32)(cla_table->cla_z_buf.buf_list[0].pa >> 32); + bat_entry_taskmap->gpa0_l = + (u32)(cla_table->cla_z_buf.buf_list[0].pa & 0xffffffff); + + bat_entry_taskmap->gpa1_h = + (u32)(cla_table->cla_z_buf.buf_list[1].pa >> 32); + bat_entry_taskmap->gpa1_l = + (u32)(cla_table->cla_z_buf.buf_list[1].pa & 0xffffffff); + + bat_entry_taskmap->gpa2_h = + (u32)(cla_table->cla_z_buf.buf_list[2].pa >> 32); + bat_entry_taskmap->gpa2_l = + (u32)(cla_table->cla_z_buf.buf_list[2].pa & 0xffffffff); + + bat_entry_taskmap->gpa3_h = + (u32)(cla_table->cla_z_buf.buf_list[3].pa >> 32); + bat_entry_taskmap->gpa3_l = + (u32)(cla_table->cla_z_buf.buf_list[3].pa & 0xffffffff); + + cqm_info(handle->dev_hdl, "Cla alloc: taskmap bat entry: 0x%x 0x%x, 0x%x 0x%x, 0x%x 0x%x, 0x%x 0x%x\n", + bat_entry_taskmap->gpa0_h, bat_entry_taskmap->gpa0_l, + bat_entry_taskmap->gpa1_h, bat_entry_taskmap->gpa1_l, + bat_entry_taskmap->gpa2_h, bat_entry_taskmap->gpa2_l, + bat_entry_taskmap->gpa3_h, bat_entry_taskmap->gpa3_l); + } +} + +/** + * cqm_bat_fill_cla - Fill the base address of the cla table into the bat table + * @cqm_handle: cqm handle + */ +void cqm_bat_fill_cla(struct cqm_handle_s *cqm_handle) +{ + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + u32 entry_type = CQM_BAT_ENTRY_T_INVALID; + u8 *entry_base_addr = NULL; + u32 i = 0; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + + /* Fill each item according to the arranged BAT table */ + entry_base_addr = bat_table->bat; + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + entry_type = bat_table->bat_entry_type[i]; + if (entry_type == CQM_BAT_ENTRY_T_CFG) { + cqm_bat_fill_cla_cfg(cqm_handle, entry_base_addr); + entry_base_addr += sizeof(struct cqm_bat_entry_cfg_s); + } else if (entry_type == CQM_BAT_ENTRY_T_TASKMAP) { + cqm_bat_fill_cla_taskmap(cqm_handle, + &bat_table->entry[i], + entry_base_addr); + entry_base_addr += + sizeof(struct cqm_bat_entry_taskmap_s); + } else if ((entry_type == CQM_BAT_ENTRY_T_INVALID) || + ((entry_type == CQM_BAT_ENTRY_T_TIMER) && + (cqm_handle->func_attribute.func_type != CQM_PPF))) { + /* When entry_type is invalid, or the timer entry under + * PF does not need to apply for memory and bat filling + */ + entry_base_addr += CQM_BAT_ENTRY_SIZE; + } else { + cla_table = &bat_table->entry[i]; + cqm_bat_fill_cla_std_entry(cqm_handle, cla_table, + entry_base_addr, entry_type, + gpa_check_enable); + entry_base_addr += + sizeof(struct cqm_bat_entry_standerd_s); + } + /* Checks if entry_base_addr is out of bounds */ + if (entry_base_addr >= + (bat_table->bat + CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE)) + break; + } +} + +static void cqm_cla_xyz_cacheline_lvl1(struct cqm_cla_table_s *cla_table, + u32 trunk_size) +{ + s32 shift = 0; + + if (cla_table->obj_size >= CQM_CHIP_CACHELINE) { + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + } else { + shift = cqm_shift(trunk_size / CQM_CHIP_CACHELINE); + cla_table->cacheline_z = shift ? (shift - 1) : (shift); + cla_table->cacheline_y = CQM_MAX_INDEX_BIT; + cla_table->cacheline_x = 0; + } +} + +s32 cqm_cla_xyz_lvl1(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 trunk_size) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_y_buf = NULL; + struct cqm_buf_s *cla_z_buf = NULL; + dma_addr_t *base = NULL; + s32 shift = 0; + u32 i = 0; + s32 ret = CQM_FAIL; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + + if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) + gpa_check_enable = 0; + + cla_table->cla_lvl = CQM_CLA_LVL_1; + + shift = cqm_shift(trunk_size / cla_table->obj_size); + cla_table->z = shift ? (shift - 1) : (shift); + cla_table->y = CQM_MAX_INDEX_BIT; + cla_table->x = 0; + cqm_cla_xyz_cacheline_lvl1(cla_table, trunk_size); + + /* Allocate y buf space */ + cla_y_buf = &cla_table->cla_y_buf; + cla_y_buf->buf_size = trunk_size; + cla_y_buf->buf_number = 1; + cla_y_buf->page_number = cla_y_buf->buf_number << + cla_table->trunk_order; + ret = cqm_buf_alloc(cqm_handle, cla_y_buf, false); + + CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, return CQM_FAIL, + CQM_ALLOC_FAIL(lvl_1_y_buf)); + + /* Allocate z buf space */ + cla_z_buf = &cla_table->cla_z_buf; + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = ALIGN(cla_table->max_buffer_size, trunk_size) / + trunk_size; + cla_z_buf->page_number = cla_z_buf->buf_number << + cla_table->trunk_order; + /* Requires static allocation of all buffer space */ + if (cla_table->alloc_static == true) { + if (cqm_buf_alloc(cqm_handle, cla_z_buf, false) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_1_z_buf)); + cqm_buf_free(cla_y_buf, cqm_handle->dev); + return CQM_FAIL; + } + + /* Fill gpa of z buf list into y buf */ + base = (dma_addr_t *)(cla_y_buf->buf_list->va); + for (i = 0; i < cla_z_buf->buf_number; i++) { + /*gpa[0]=1 means this GPA is valid*/ + *base = (cla_z_buf->buf_list[i].pa | gpa_check_enable); + base++; + } + + /* big-endian conversion */ + cqm_swab64((u8 *)(cla_y_buf->buf_list->va), + cla_z_buf->buf_number); + } else { + /* Only initialize and allocate buf list space, buffer spaces + * are dynamically allocated in service + */ + cla_z_buf->buf_list = (struct cqm_buf_list_s *) + vmalloc(cla_z_buf->buf_number * + sizeof(struct cqm_buf_list_s)); + + if (!cla_z_buf->buf_list) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_1_z_buf)); + cqm_buf_free(cla_y_buf, cqm_handle->dev); + return CQM_FAIL; + } + memset(cla_z_buf->buf_list, 0, + cla_z_buf->buf_number * sizeof(struct cqm_buf_list_s)); + } + + return CQM_SUCCESS; +} + +static s32 cqm_cla_yz_lvl2_static(struct cqm_handle_s *cqm_handle, + struct cqm_buf_s *cla_y_buf, + struct cqm_buf_s *cla_z_buf, + u8 gpa_check_enable) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + dma_addr_t *base = NULL; + u32 i = 0; + + if (cqm_buf_alloc(cqm_handle, cla_z_buf, false) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_z_buf)); + return CQM_FAIL; + } + + /* The virtual address of y buf is remapped for software access */ + if (cqm_buf_alloc(cqm_handle, cla_y_buf, true) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_y_buf)); + cqm_buf_free(cla_z_buf, cqm_handle->dev); + return CQM_FAIL; + } + + /* Fill gpa of z buf list into y buf */ + base = (dma_addr_t *)(cla_y_buf->direct.va); + for (i = 0; i < cla_z_buf->buf_number; i++) { + /*gpa[0]=1 means this GPA is valid*/ + *base = (cla_z_buf->buf_list[i].pa | gpa_check_enable); + base++; + } + + /* big-endian conversion */ + cqm_swab64((u8 *)(cla_y_buf->direct.va), cla_z_buf->buf_number); + + return CQM_SUCCESS; +} + +static void cqm_cla_yz_lvl2_init_cacheline(struct cqm_cla_table_s *cla_table, + u32 trunk_size) +{ + s32 shift = 0; + + if (cla_table->obj_size >= CQM_CHIP_CACHELINE) { + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + } else { + shift = cqm_shift(trunk_size / CQM_CHIP_CACHELINE); + cla_table->cacheline_z = shift ? (shift - 1) : (shift); + shift = cqm_shift(trunk_size / sizeof(dma_addr_t)); + cla_table->cacheline_y = cla_table->cacheline_z + shift; + cla_table->cacheline_x = CQM_MAX_INDEX_BIT; + } +} + +s32 cqm_cla_xyz_lvl2(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 trunk_size) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_x_buf = NULL; + struct cqm_buf_s *cla_y_buf = NULL; + struct cqm_buf_s *cla_z_buf = NULL; + dma_addr_t *base = NULL; + s32 shift = 0; + u32 i = 0; + s32 ret = CQM_FAIL; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + + if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) + gpa_check_enable = 0; + + cla_table->cla_lvl = CQM_CLA_LVL_2; + + shift = cqm_shift(trunk_size / cla_table->obj_size); + cla_table->z = shift ? (shift - 1) : (shift); + shift = cqm_shift(trunk_size / sizeof(dma_addr_t)); + cla_table->y = cla_table->z + shift; + cla_table->x = CQM_MAX_INDEX_BIT; + + cqm_cla_yz_lvl2_init_cacheline(cla_table, trunk_size); + + /* Allocate x buf space */ + cla_x_buf = &cla_table->cla_x_buf; + cla_x_buf->buf_size = trunk_size; + cla_x_buf->buf_number = 1; + cla_x_buf->page_number = cla_x_buf->buf_number << + cla_table->trunk_order; + + ret = cqm_buf_alloc(cqm_handle, cla_x_buf, false); + CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, return CQM_FAIL, + CQM_ALLOC_FAIL(lvl_2_x_buf)); + + /* Allocate y buf and z buf space */ + cla_z_buf = &cla_table->cla_z_buf; + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = ALIGN(cla_table->max_buffer_size, trunk_size) / + trunk_size; + cla_z_buf->page_number = cla_z_buf->buf_number << + cla_table->trunk_order; + + cla_y_buf = &cla_table->cla_y_buf; + cla_y_buf->buf_size = trunk_size; + cla_y_buf->buf_number = + (ALIGN(cla_z_buf->buf_number * sizeof(dma_addr_t), + trunk_size)) / trunk_size; + + cla_y_buf->page_number = cla_y_buf->buf_number << + cla_table->trunk_order; + + /* Requires static allocation of all buffer space */ + if (cla_table->alloc_static == true) { + if (cqm_cla_yz_lvl2_static(cqm_handle, + cla_y_buf, + cla_z_buf, + gpa_check_enable) == CQM_FAIL) { + cqm_buf_free(cla_x_buf, cqm_handle->dev); + return CQM_FAIL; + } + /* Fill gpa of y buf list into x buf */ + base = (dma_addr_t *)(cla_x_buf->buf_list->va); + for (i = 0; i < cla_y_buf->buf_number; i++) { + /* gpa[0]=1 means this GPA is valid */ + *base = (cla_y_buf->buf_list[i].pa | gpa_check_enable); + base++; + } + + /* big-endian conversion */ + cqm_swab64((u8 *)(cla_x_buf->buf_list->va), + cla_y_buf->buf_number); + } else { + /* Only initialize and allocate buf list space, buffer spaces + * are allocated in service + */ + cla_z_buf->buf_list = (struct cqm_buf_list_s *) + vmalloc(cla_z_buf->buf_number * + sizeof(struct cqm_buf_list_s)); + if (!cla_z_buf->buf_list) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_z_buf)); + cqm_buf_free(cla_x_buf, cqm_handle->dev); + return CQM_FAIL; + } + memset(cla_z_buf->buf_list, 0, + cla_z_buf->buf_number * sizeof(struct cqm_buf_list_s)); + + cla_y_buf->buf_list = (struct cqm_buf_list_s *) + vmalloc(cla_y_buf->buf_number * + sizeof(struct cqm_buf_list_s)); + + if (!cla_y_buf->buf_list) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_y_buf)); + cqm_buf_free(cla_z_buf, cqm_handle->dev); + cqm_buf_free(cla_x_buf, cqm_handle->dev); + return CQM_FAIL; + } + memset(cla_y_buf->buf_list, 0, + cla_y_buf->buf_number * sizeof(struct cqm_buf_list_s)); + } + + return CQM_SUCCESS; +} + +static s32 cqm_cla_xyz_check(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + if (cla_table->obj_num == 0) { + /* If the capability is set to 0, the CLA does not need to be + * initialized and exits directly + */ + cqm_info(handle->dev_hdl, "Cla alloc: cla_type %u, obj_num=0, don't alloc buffer\n", + cla_table->type); + return CQM_SUCCESS; + } + + /* Check whether obj_size is aligned with 2^n, and error is reported in + * case of 0 and 1 + */ + if (cqm_check_align(cla_table->obj_size) == false) { + cqm_err(handle->dev_hdl, "Cla alloc: cla_type %u, obj_size 0x%x is not align on 2^n\n", + cla_table->type, cla_table->obj_size); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_cla_xyz - Calculate how many levels of cla tables and allocate space + * for each level of cla tables + * @cqm_handle: cqm handle + * @cla_table: cla table + * Return: 0 - success, negative - failure + */ +s32 cqm_cla_xyz(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_z_buf = NULL; + u32 trunk_size = 0; + s32 ret = CQM_FAIL; + + if (cqm_cla_xyz_check(cqm_handle, cla_table) == CQM_FAIL) + return CQM_FAIL; + + trunk_size = PAGE_SIZE << cla_table->trunk_order; + + if (trunk_size < cla_table->obj_size) { + cqm_err(handle->dev_hdl, "Cla alloc: cla type %u, obj_size 0x%x is out of trunk size\n", + cla_table->type, cla_table->obj_size); + return CQM_FAIL; + } + + /* Level 0 CLA: The buffer occupies little space, and can be assigned to + * cla_z_buf during initialization + */ + if (cla_table->max_buffer_size <= trunk_size) { + cla_table->cla_lvl = CQM_CLA_LVL_0; + + cla_table->z = CQM_MAX_INDEX_BIT; + cla_table->y = 0; + cla_table->x = 0; + + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + + /* Allocate z buf space */ + cla_z_buf = &cla_table->cla_z_buf; + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = 1; + cla_z_buf->page_number = + cla_z_buf->buf_number << cla_table->trunk_order; + ret = cqm_buf_alloc(cqm_handle, cla_z_buf, false); + CQM_CHECK_EQUAL_RET( + handle->dev_hdl, ret, CQM_SUCCESS, + return CQM_FAIL, CQM_ALLOC_FAIL(lvl_0_z_buf)); + + } else if (cla_table->max_buffer_size <= + (trunk_size * (trunk_size / sizeof(dma_addr_t)))) { + /* Level 1 CLA: Cla_y_buf is allocated during initialization, + * and cla_z_buf can be allocated dynamically + */ + if (cqm_cla_xyz_lvl1(cqm_handle, + cla_table, trunk_size) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl1)); + return CQM_FAIL; + } + } else if (cla_table->max_buffer_size <= + (trunk_size * (trunk_size / sizeof(dma_addr_t)) * + (trunk_size / sizeof(dma_addr_t)))) { + /* Level 2 CLA: Cla_x_buf is allocated during initialization, + * and cla_y_buf and cla_z_buf can be dynamically allocated + */ + if (cqm_cla_xyz_lvl2(cqm_handle, cla_table, trunk_size) == + CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl2)); + return CQM_FAIL; + } + } else { + cqm_err(handle->dev_hdl, "Cla alloc: cla max_buffer_size 0x%x exceeds support range\n", + cla_table->max_buffer_size); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static void cqm_bat_entry_hash_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->hash_number * + capability->hash_basic_size; + cla_table->obj_size = capability->hash_basic_size; + cla_table->obj_num = capability->hash_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_qpc_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_handle_s *handle = (struct cqm_handle_s *)cqm_handle; + struct hifc_hwdev *hwdev_handle = handle->ex_handle; + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->qpc_number * + capability->qpc_basic_size; + cla_table->obj_size = capability->qpc_basic_size; + cla_table->obj_num = capability->qpc_number; + cla_table->alloc_static = capability->qpc_alloc_static; + cqm_info(hwdev_handle->dev_hdl, "Cla alloc: qpc alloc_static=%d\n", + cla_table->alloc_static); +} + +static void cqm_bat_entry_mpt_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->mpt_number * + capability->mpt_basic_size; + cla_table->obj_size = capability->mpt_basic_size; + cla_table->obj_num = capability->mpt_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_scqc_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_handle_s *handle = (struct cqm_handle_s *)cqm_handle; + struct hifc_hwdev *hwdev_handle = handle->ex_handle; + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->scqc_number * + capability->scqc_basic_size; + cla_table->obj_size = capability->scqc_basic_size; + cla_table->obj_num = capability->scqc_number; + cla_table->alloc_static = capability->scqc_alloc_static; + cqm_info(hwdev_handle->dev_hdl, "Cla alloc: scqc alloc_static=%d\n", + cla_table->alloc_static); +} + +static void cqm_bat_entry_srqc_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->srqc_number * + capability->srqc_basic_size; + cla_table->obj_size = capability->srqc_basic_size; + cla_table->obj_num = capability->srqc_number; + cla_table->alloc_static = false; +} + +static void cqm_bat_entry_gid_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->max_buffer_size = capability->gid_number * + capability->gid_basic_size; + cla_table->trunk_order = (u32)cqm_shift( + ALIGN( + cla_table->max_buffer_size, + PAGE_SIZE) / PAGE_SIZE); + cla_table->obj_size = capability->gid_basic_size; + cla_table->obj_num = capability->gid_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_lun_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; + cla_table->max_buffer_size = capability->lun_number * + capability->lun_basic_size; + cla_table->obj_size = capability->lun_basic_size; + cla_table->obj_num = capability->lun_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_taskmap_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = CQM_4K_PAGE_ORDER; + cla_table->max_buffer_size = capability->taskmap_number * + capability->taskmap_basic_size; + cla_table->obj_size = capability->taskmap_basic_size; + cla_table->obj_num = capability->taskmap_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_l3i_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; + cla_table->max_buffer_size = capability->l3i_number * + capability->l3i_basic_size; + cla_table->obj_size = capability->l3i_basic_size; + cla_table->obj_num = capability->l3i_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_childc_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->childc_number * + capability->childc_basic_size; + cla_table->obj_size = capability->childc_basic_size; + cla_table->obj_num = capability->childc_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_timer_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = CQM_4K_PAGE_ORDER; + cla_table->max_buffer_size = capability->timer_number * + capability->timer_basic_size; + cla_table->obj_size = capability->timer_basic_size; + cla_table->obj_num = capability->timer_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_xid2cid_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->xid2cid_number * + capability->xid2cid_basic_size; + cla_table->obj_size = capability->xid2cid_basic_size; + cla_table->obj_num = capability->xid2cid_number; + cla_table->alloc_static = true; +} + +static void cqm_bat_entry_reoder_init(void *cqm_handle, + struct cqm_cla_table_s *cla_table, + void *cap) +{ + struct cqm_func_capability_s *capability = + (struct cqm_func_capability_s *)cap; + + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->reorder_number * + capability->reorder_basic_size; + cla_table->obj_size = capability->reorder_basic_size; + cla_table->obj_num = capability->reorder_number; + cla_table->alloc_static = true; +} + +struct cqm_cla_entry_init_s cqm_cla_entry_init_tbl[] = { + {CQM_BAT_ENTRY_T_HASH, cqm_bat_entry_hash_init}, + {CQM_BAT_ENTRY_T_QPC, cqm_bat_entry_qpc_init}, + {CQM_BAT_ENTRY_T_MPT, cqm_bat_entry_mpt_init}, + {CQM_BAT_ENTRY_T_SCQC, cqm_bat_entry_scqc_init}, + {CQM_BAT_ENTRY_T_SRQC, cqm_bat_entry_srqc_init}, + {CQM_BAT_ENTRY_T_GID, cqm_bat_entry_gid_init}, + {CQM_BAT_ENTRY_T_LUN, cqm_bat_entry_lun_init}, + {CQM_BAT_ENTRY_T_TASKMAP, cqm_bat_entry_taskmap_init}, + {CQM_BAT_ENTRY_T_L3I, cqm_bat_entry_l3i_init}, + {CQM_BAT_ENTRY_T_CHILDC, cqm_bat_entry_childc_init}, + {CQM_BAT_ENTRY_T_TIMER, cqm_bat_entry_timer_init}, + {CQM_BAT_ENTRY_T_XID2CID, cqm_bat_entry_xid2cid_init}, + {CQM_BAT_ENTRY_T_REORDER, cqm_bat_entry_reoder_init}, +}; + +static struct cqm_cla_entry_init_s *cqm_get_cla_init_entry( + struct cqm_handle_s *cqm_handle, + u32 type) +{ + int i; + struct cqm_cla_entry_init_s *entry = NULL; + + for (i = 0; + i < (sizeof(cqm_cla_entry_init_tbl) / + sizeof(struct cqm_cla_entry_init_s)); i++) { + entry = &cqm_cla_entry_init_tbl[i]; + if (entry->type == type) + return entry; + } + + return NULL; +} + +void cqm_cla_init_entry(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + struct cqm_func_capability_s *capability) +{ + struct cqm_cla_entry_init_s *entry; + + entry = cqm_get_cla_init_entry(cqm_handle, cla_table->type); + if (entry && entry->cqm_cla_init_handler) + entry->cqm_cla_init_handler(cqm_handle, cla_table, capability); +} + +static s32 cqm_cla_fill_entry(struct cqm_handle_s *cqm_handle) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + s32 ret = CQM_FAIL; + + /* After the allocation of CLA entry, fill in the BAT table */ + cqm_bat_fill_cla(cqm_handle); + + /* Notify the chip to refresh the BAT table */ + ret = cqm_bat_update(cqm_handle); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update)); + return CQM_FAIL; + } + + cqm_info(handle->dev_hdl, "Timer start: func_type=%d, timer_enable=%u\n", + cqm_handle->func_attribute.func_type, + cqm_handle->func_capability.timer_enable); + + if ((cqm_handle->func_attribute.func_type == CQM_PPF) && + (cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE)) { + /* After the timer resource is allocated, + * the timer needs to be enabled + */ + cqm_info(handle->dev_hdl, "Timer start: hifc ppf timer start\n"); + ret = hifc_ppf_tmr_start((void *)(cqm_handle->ex_handle)); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "Timer start: hifc ppf timer start, ret=%d\n", + ret); + return CQM_FAIL; + } + } + return CQM_SUCCESS; +} + +s32 cqm_cla_init(struct cqm_handle_s *cqm_handle) +{ + struct cqm_func_capability_s *capability = &cqm_handle->func_capability; + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + s32 inv_flag = 0; + u32 i = 0; + u32 j = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + cla_table->type = bat_table->bat_entry_type[i]; + + cqm_cla_init_entry(cqm_handle, cla_table, capability); + + /* Allocate CLA entry space of all levels */ + if ((cla_table->type >= CQM_BAT_ENTRY_T_HASH) && + (cla_table->type <= CQM_BAT_ENTRY_T_REORDER)) { + /* Only needs to allocate timer resources for PPF, + * 8 wheels * 2k scales * 32B * func_num, for PF, there + * is no need to allocate resources for the timer, nor + * to fill in the structure of the timer entry in the + * BAT table. + */ + if (!((cla_table->type == CQM_BAT_ENTRY_T_TIMER) && + (cqm_handle->func_attribute.func_type + != CQM_PPF))) { + if (cqm_cla_xyz(cqm_handle, cla_table) == + CQM_FAIL) + goto err; + } + } + mutex_init(&cla_table->lock); + } + if (cqm_cla_fill_entry(cqm_handle) == CQM_FAIL) + goto err; + + return CQM_SUCCESS; + +err: + for (j = 0; j < i; j++) { + cla_table = &bat_table->entry[j]; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_x_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_y_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_z_buf, + &inv_flag); + } + } + + return CQM_FAIL; +} + +void cqm_cla_uninit(struct cqm_handle_s *cqm_handle) +{ + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + s32 inv_flag = 0; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_x_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_y_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_z_buf, + &inv_flag); + } + } +} + +s32 cqm_cla_update(struct cqm_handle_s *cqm_handle, + struct cqm_buf_list_s *buf_node_parent, + struct cqm_buf_list_s *buf_node_child, + u32 child_index, u8 cla_update_mode) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_cmd_buf_s *buf_in = NULL; + struct cqm_cla_update_cmd_s *cmd = NULL; + dma_addr_t pa = 0; + s32 ret = CQM_FAIL; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + + buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle)); + CQM_PTR_CHECK_RET(buf_in, return CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); + buf_in->size = sizeof(struct cqm_cla_update_cmd_s); + + /* Fill the command format and convert to big endian */ + cmd = (struct cqm_cla_update_cmd_s *)(buf_in->buf); + + pa = buf_node_parent->pa + (child_index * sizeof(dma_addr_t)); + cmd->gpa_h = CQM_ADDR_HI(pa); + cmd->gpa_l = CQM_ADDR_LW(pa); + + pa = buf_node_child->pa; + cmd->value_h = CQM_ADDR_HI(pa); + cmd->value_l = CQM_ADDR_LW(pa); + + cqm_dbg("Cla alloc: cqm_cla_update, gpa=0x%x 0x%x, value=0x%x 0x%x, cla_update_mode=0x%x\n", + cmd->gpa_h, cmd->gpa_l, cmd->value_h, cmd->value_l, + cla_update_mode); + + /* CLA GPA check */ + if (gpa_check_enable) { + switch (cla_update_mode) { + /* gpa[0]=1 means this GPA is valid */ + case CQM_CLA_RECORD_NEW_GPA: + cmd->value_l |= 1; + break; + /* gpa[0]=0 means this GPA is valid */ + case CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID: + case CQM_CLA_DEL_GPA_WITH_CACHE_INVALID: + cmd->value_l &= (~1); + break; + default: + cqm_err(handle->dev_hdl, + "Cla alloc: cqm_cla_update, cqm_cla_update, wrong cla_update_mode=%u\n", + cla_update_mode); + break; + } + } + + cqm_swab32((u8 *)cmd, (sizeof(struct cqm_cla_update_cmd_s) >> 2)); + + ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle), + CQM_CMD_ACK_TYPE_CMDQ, + CQM_MOD_CQM, CQM_CMD_T_CLA_UPDATE, + buf_in, NULL, CQM_CMD_TIMEOUT); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(handle->dev_hdl, + "Cla alloc: cqm_cla_update, cqm_send_cmd_box_ret=%d\n", + ret); + cqm_err(handle->dev_hdl, "Cla alloc: cqm_cla_update, cla_update_cmd: 0x%x 0x%x 0x%x 0x%x\n", + cmd->gpa_h, cmd->gpa_l, cmd->value_h, cmd->value_l); + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return CQM_FAIL; + } + + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return CQM_SUCCESS; +} + +/** + * cqm_cla_alloc - Allocate a CLA trunk page + * @cqm_handle: cqm handle + * @cla_table: cla handle + * @buf_node_parent: the parent node whose content is to be updated + * @buf_node_child: the child node whose content is to be allocated + * @child_index: child index + * Return: 0 - success, negative - failure + */ +s32 cqm_cla_alloc(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + struct cqm_buf_list_s *buf_node_parent, + struct cqm_buf_list_s *buf_node_child, u32 child_index) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + s32 ret = CQM_FAIL; + + /* Allocate trunk page */ + buf_node_child->va = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + cla_table->trunk_order); + CQM_PTR_CHECK_RET(buf_node_child->va, return CQM_FAIL, + CQM_ALLOC_FAIL(va)); + + /* pci mapping */ + buf_node_child->pa = + pci_map_single(cqm_handle->dev, buf_node_child->va, + PAGE_SIZE << cla_table->trunk_order, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, buf_node_child->pa)) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_node_child->pa)); + goto err1; + } + + /* Notify the chip of trunk_pa and + * let it fill in the cla table entry + */ + ret = cqm_cla_update(cqm_handle, buf_node_parent, + buf_node_child, child_index, + CQM_CLA_RECORD_NEW_GPA); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update)); + goto err2; + } + + return CQM_SUCCESS; + +err2: + pci_unmap_single(cqm_handle->dev, buf_node_child->pa, + PAGE_SIZE << cla_table->trunk_order, + PCI_DMA_BIDIRECTIONAL); +err1: + free_pages((ulong)(buf_node_child->va), cla_table->trunk_order); + buf_node_child->va = NULL; + return CQM_FAIL; +} + +void cqm_cla_free(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + struct cqm_buf_list_s *buf_node_parent, + struct cqm_buf_list_s *buf_node_child, + u32 child_index, u8 cla_update_mode) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + cqm_dbg("Cla free: cla_update_mode=%u\n", cla_update_mode); + + if (cqm_cla_update(cqm_handle, buf_node_parent, + buf_node_child, child_index, + cla_update_mode) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update)); + return; + } + + if (cla_update_mode == CQM_CLA_DEL_GPA_WITH_CACHE_INVALID) { + if (cqm_cla_cache_invalid( + cqm_handle, buf_node_child->pa, + PAGE_SIZE << cla_table->trunk_order) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_cache_invalid)); + return; + } + } + + /* Unblock the pci mapping of the trunk page */ + pci_unmap_single(cqm_handle->dev, buf_node_child->pa, + PAGE_SIZE << cla_table->trunk_order, + PCI_DMA_BIDIRECTIONAL); + + /* Free trunk page */ + free_pages((ulong)(buf_node_child->va), cla_table->trunk_order); + buf_node_child->va = NULL; +} + +/** + * cqm_static_qpc_cla_get - When QPC is a static allocation, allocate the count + * of buffer from the index position in the cla table without lock + * @cqm_handle: cqm handle + * @cla_table: cla handle + * @index: the index of table + * @count: the count of buffer + * @pa: the physical address + * Return: the virtual address + */ +u8 *cqm_static_qpc_cla_get(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf; + struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf; + struct cqm_buf_list_s *buf_node_z = NULL; + u32 x_index = 0; + u32 y_index = 0; + u32 z_index = 0; + u32 trunk_size = PAGE_SIZE << cla_table->trunk_order; + u8 *ret_addr = NULL; + u32 offset = 0; + + if (cla_table->cla_lvl == CQM_CLA_LVL_0) { + offset = index * cla_table->obj_size; + ret_addr = (u8 *)(cla_z_buf->buf_list->va) + offset; + *pa = cla_z_buf->buf_list->pa + offset; + } else if (cla_table->cla_lvl == CQM_CLA_LVL_1) { + z_index = index & ((1 << (cla_table->z + 1)) - 1); + y_index = index >> (cla_table->z + 1); + + if (y_index >= cla_z_buf->buf_number) { + cqm_err(handle->dev_hdl, + "Static qpc cla get: index exceeds buf_number, y_index %u, z_buf_number %u\n", + y_index, cla_z_buf->buf_number); + return NULL; + } + buf_node_z = &cla_z_buf->buf_list[y_index]; + if (!buf_node_z->va) { + cqm_err(handle->dev_hdl, "Cla get: static qpc cla_z_buf[%u].va=NULL\n", + y_index); + return NULL; + } + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + } else { + z_index = index & ((1 << (cla_table->z + 1)) - 1); + y_index = (index >> (cla_table->z + 1)) & + ((1 << (cla_table->y - cla_table->z)) - 1); + x_index = index >> (cla_table->y + 1); + + if ((x_index >= cla_y_buf->buf_number) || + ((x_index * (trunk_size / sizeof(dma_addr_t)) + y_index) >= + cla_z_buf->buf_number)) { + cqm_err(handle->dev_hdl, + "Static qpc cla get: index exceeds buf_number,x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n ", + x_index, y_index, cla_y_buf->buf_number, + cla_z_buf->buf_number); + return NULL; + } + + buf_node_z = &(cla_z_buf->buf_list[x_index * + (trunk_size / sizeof(dma_addr_t)) + y_index]); + if (!buf_node_z->va) { + cqm_err(handle->dev_hdl, "Cla get: static qpc cla_z_buf.va=NULL\n"); + return NULL; + } + + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + } + + return ret_addr; +} + +static s32 cqm_cla_get_level_two(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 index, u32 count, + dma_addr_t *pa, u8 **ret_addr) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_x_buf = &cla_table->cla_x_buf; + struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf; + struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf; + struct cqm_buf_list_s *buf_node_x = NULL; + struct cqm_buf_list_s *buf_node_y = NULL; + struct cqm_buf_list_s *buf_node_z = NULL; + u32 x_index = 0; + u32 y_index = 0; + u32 z_index = 0; + u32 trunk_size = PAGE_SIZE << cla_table->trunk_order; + u32 offset = 0; + + z_index = index & ((1 << (cla_table->z + 1)) - 1); + y_index = (index >> (cla_table->z + 1)) & + ((1 << (cla_table->y - cla_table->z)) - 1); + x_index = index >> (cla_table->y + 1); + + if ((x_index >= cla_y_buf->buf_number) || + ((x_index * (trunk_size / sizeof(dma_addr_t)) + y_index) >= + cla_z_buf->buf_number)) { + cqm_err(handle->dev_hdl, + "Cla get: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n", + x_index, y_index, cla_y_buf->buf_number, + cla_z_buf->buf_number); + return CQM_FAIL; + } + + buf_node_x = cla_x_buf->buf_list; + buf_node_y = &cla_y_buf->buf_list[x_index]; + buf_node_z = &(cla_z_buf->buf_list[x_index * + (trunk_size / sizeof(dma_addr_t)) + y_index]); + + /* Y buf node does not exist, allocates y node page */ + if (!buf_node_y->va) { + if (cqm_cla_alloc( + cqm_handle, cla_table, + buf_node_x, buf_node_y, x_index) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_alloc)); + return CQM_FAIL; + } + } + + /* Z buf node does not exist, allocates z node page */ + if (!buf_node_z->va) { + if (cqm_cla_alloc(cqm_handle, + cla_table, + buf_node_y, + buf_node_z, + y_index) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_alloc)); + if (buf_node_y->refcount == 0) { + /* Free y node, needs cache_invalid */ + cqm_cla_free( + cqm_handle, cla_table, + buf_node_x, buf_node_y, x_index, + CQM_CLA_DEL_GPA_WITH_CACHE_INVALID); + } + return CQM_FAIL; + } + + cqm_dbg("Cla get: 2L: y_refcount=0x%x\n", buf_node_y->refcount); + /* Y buf node's reference count should be +1 */ + buf_node_y->refcount++; + } + + cqm_dbg("Cla get: 2L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + *ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + + return CQM_SUCCESS; +} + +static s32 cqm_cla_get_level_one(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 index, u32 count, dma_addr_t *pa, + u8 **ret_addr) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf; + struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf; + struct cqm_buf_list_s *buf_node_y = NULL; + struct cqm_buf_list_s *buf_node_z = NULL; + u32 y_index = 0; + u32 z_index = 0; + u32 offset = 0; + + z_index = index & ((1 << (cla_table->z + 1)) - 1); + y_index = index >> (cla_table->z + 1); + + if (y_index >= cla_z_buf->buf_number) { + cqm_err(handle->dev_hdl, + "Cla get: index exceeds buf_number, y_index %u, z_buf_number %u\n", + y_index, cla_z_buf->buf_number); + return CQM_FAIL; + } + buf_node_z = &cla_z_buf->buf_list[y_index]; + buf_node_y = cla_y_buf->buf_list; + + /* Z buf node does not exist, first allocate the page */ + if (!buf_node_z->va) { + if (cqm_cla_alloc(cqm_handle, + cla_table, + buf_node_y, + buf_node_z, + y_index) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_alloc)); + cqm_err(handle->dev_hdl, + "Cla get: cla_table->type=%u\n", + cla_table->type); + return CQM_FAIL; + } + } + + cqm_dbg("Cla get: 1L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + *ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + + return CQM_SUCCESS; +} + +/** + * cqm_cla_get - Allocate the count of buffer from the index position in the + * cla table + * @cqm_handle: cqm handle + * @cla_table: cla table + * @index: the index of table + * @count: the count of buffer + * @pa: the physical address + * Return: the virtual address + */ +u8 *cqm_cla_get(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, u32 index, + u32 count, dma_addr_t *pa) +{ + struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf; + u8 *ret_addr = NULL; + u32 offset = 0; + + mutex_lock(&cla_table->lock); + if (cla_table->cla_lvl == CQM_CLA_LVL_0) { + /* Level 0 CLA pages are statically allocated */ + offset = index * cla_table->obj_size; + ret_addr = (u8 *)(cla_z_buf->buf_list->va) + offset; + *pa = cla_z_buf->buf_list->pa + offset; + } else if (cla_table->cla_lvl == CQM_CLA_LVL_1) { + if (cqm_cla_get_level_one(cqm_handle, cla_table, + index, count, + pa, &ret_addr) == CQM_FAIL) { + mutex_unlock(&cla_table->lock); + return NULL; + } + } else { + if (cqm_cla_get_level_two(cqm_handle, + cla_table, + index, + count, + pa, + &ret_addr) == CQM_FAIL) { + mutex_unlock(&cla_table->lock); + return NULL; + } + } + + mutex_unlock(&cla_table->lock); + return ret_addr; +} + +/** + * cqm_cla_put -Decrease the reference count of the trunk page, if it is reduced + * to 0, release the trunk page + * @cqm_handle: cqm handle + * @cla_table: cla table + * @index: the index of table + * @count: the count of buffer + */ +void cqm_cla_put(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + u32 index, u32 count) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_buf_s *cla_x_buf = &cla_table->cla_x_buf; + struct cqm_buf_s *cla_y_buf = &cla_table->cla_y_buf; + struct cqm_buf_s *cla_z_buf = &cla_table->cla_z_buf; + struct cqm_buf_list_s *buf_node_x = NULL; + struct cqm_buf_list_s *buf_node_y = NULL; + struct cqm_buf_list_s *buf_node_z = NULL; + u32 x_index = 0; + u32 y_index = 0; + u32 trunk_size = PAGE_SIZE << cla_table->trunk_order; + + /* Buffer is statically allocated, + * no need to control the reference count + */ + if (cla_table->alloc_static == true) + return; + + mutex_lock(&cla_table->lock); + + if (cla_table->cla_lvl == CQM_CLA_LVL_1) { + y_index = index >> (cla_table->z + 1); + + if (y_index >= cla_z_buf->buf_number) { + cqm_err(handle->dev_hdl, + "Cla put: index exceeds buf_number, y_index %u, z_buf_number %u\n", + y_index, cla_z_buf->buf_number); + cqm_err(handle->dev_hdl, + "Cla put: cla_table->type=%u\n", + cla_table->type); + mutex_unlock(&cla_table->lock); + return; + } + + buf_node_z = &cla_z_buf->buf_list[y_index]; + buf_node_y = cla_y_buf->buf_list; + + /* When the z node page reference count is 0, + * release the z node page + */ + cqm_dbg("Cla put: 1L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + buf_node_z->refcount -= count; + if (buf_node_z->refcount == 0) { + /* Z node does not need cache invalid */ + cqm_cla_free(cqm_handle, cla_table, buf_node_y, + buf_node_z, y_index, + CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID); + } + } else if (cla_table->cla_lvl == CQM_CLA_LVL_2) { + y_index = (index >> (cla_table->z + 1)) & + ((1 << (cla_table->y - cla_table->z)) - 1); + x_index = index >> (cla_table->y + 1); + + if ((x_index >= cla_y_buf->buf_number) || + ((x_index * (trunk_size / sizeof(dma_addr_t)) + y_index) >= + cla_z_buf->buf_number)) { + cqm_err(handle->dev_hdl, + "Cla put: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n", + x_index, y_index, cla_y_buf->buf_number, + cla_z_buf->buf_number); + mutex_unlock(&cla_table->lock); + return; + } + + buf_node_x = cla_x_buf->buf_list; + buf_node_y = &cla_y_buf->buf_list[x_index]; + buf_node_z = &(cla_z_buf->buf_list[x_index * + (trunk_size / sizeof(dma_addr_t)) + y_index]); + cqm_dbg("Cla put: 2L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + + /* When the z node page reference count is 0, + * release the z node page + */ + buf_node_z->refcount -= count; + if (buf_node_z->refcount == 0) { + cqm_cla_free(cqm_handle, cla_table, buf_node_y, + buf_node_z, y_index, + CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID); + + /* When the y node page reference count is 0, + * release the y node page + */ + cqm_dbg("Cla put: 2L: y_refcount=0x%x\n", + buf_node_y->refcount); + buf_node_y->refcount--; + if (buf_node_y->refcount == 0) { + /* Y node needs cache invalid */ + cqm_cla_free( + cqm_handle, cla_table, buf_node_x, + buf_node_y, x_index, + CQM_CLA_DEL_GPA_WITH_CACHE_INVALID); + } + } + } + + mutex_unlock(&cla_table->lock); +} + +/** + * cqm_cla_table_get - Find the CLA table structure corresponding to a BAT entry + * @bat_table: bat table + * @entry_type: entry type + * @count: the count of buffer + * Return: the CLA table + */ +struct cqm_cla_table_s *cqm_cla_table_get(struct cqm_bat_table_s *bat_table, + u32 entry_type) +{ + struct cqm_cla_table_s *cla_table = NULL; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (entry_type == cla_table->type) + return cla_table; + } + + return NULL; +} + +#define bitmap_section + +/** + * __cqm_bitmap_init - Initialize a bitmap + * @bitmap: cqm bitmap table + * Return: 0 - success, negative - failure + */ +s32 __cqm_bitmap_init(struct cqm_bitmap_s *bitmap) +{ + spin_lock_init(&bitmap->lock); + + /* The max_num of bitmap is aligned by 8, and then shifted right by + * 3bits to get how many Bytes are needed + */ + bitmap->table = + (ulong *)vmalloc((ALIGN(bitmap->max_num, 8) >> 3)); + CQM_PTR_CHECK_RET(bitmap->table, return CQM_FAIL, + CQM_ALLOC_FAIL(bitmap->table)); + memset(bitmap->table, 0, (ALIGN(bitmap->max_num, 8) >> 3)); + + return CQM_SUCCESS; +} + +static s32 cqm_bitmap_init_by_type(struct cqm_handle_s *cqm_handle, + struct cqm_cla_table_s *cla_table, + struct cqm_bitmap_s *bitmap) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_func_capability_s *capability = &cqm_handle->func_capability; + s32 ret = CQM_SUCCESS; + + switch (cla_table->type) { + case CQM_BAT_ENTRY_T_QPC: + bitmap->max_num = capability->qpc_number; + bitmap->reserved_top = capability->qpc_reserved; + bitmap->last = capability->qpc_reserved; + cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = __cqm_bitmap_init(bitmap); + break; + case CQM_BAT_ENTRY_T_MPT: + bitmap->max_num = capability->mpt_number; + bitmap->reserved_top = capability->mpt_reserved; + bitmap->last = capability->mpt_reserved; + cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = __cqm_bitmap_init(bitmap); + break; + case CQM_BAT_ENTRY_T_SCQC: + bitmap->max_num = capability->scqc_number; + bitmap->reserved_top = capability->scq_reserved; + bitmap->last = capability->scq_reserved; + cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = __cqm_bitmap_init(bitmap); + break; + case CQM_BAT_ENTRY_T_SRQC: + bitmap->max_num = capability->srqc_number; + bitmap->reserved_top = 0; + bitmap->last = 0; + cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = __cqm_bitmap_init(bitmap); + break; + default: + break; + } + + return ret; +} + +/** + * cqm_bitmap_init - Initialize a bitmap + * @cqm_handle: cqm handle + * Return: 0 - success, negative - failure + */ +s32 cqm_bitmap_init(struct cqm_handle_s *cqm_handle) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_bitmap_s *bitmap = NULL; + u32 i = 0; + s32 ret = CQM_SUCCESS; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->obj_num == 0) { + cqm_info(handle->dev_hdl, "Cla alloc: cla_type %u, obj_num=0, don't init bitmap\n", + cla_table->type); + continue; + } + + bitmap = &cla_table->bitmap; + ret = cqm_bitmap_init_by_type(cqm_handle, cla_table, bitmap); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "Bitmap init: failed to init cla_table_type=%u, obj_num=0x%x\n", + cla_table->type, cla_table->obj_num); + goto err; + } + } + + return CQM_SUCCESS; + +err: + cqm_bitmap_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * cqm_bitmap_uninit - Uninitialize a bitmap + * @cqm_handle: cqm handle + */ +void cqm_bitmap_uninit(struct cqm_handle_s *cqm_handle) +{ + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_bitmap_s *bitmap = NULL; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + bitmap = &cla_table->bitmap; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { + if (bitmap->table) { + vfree(bitmap->table); + bitmap->table = NULL; + } + } + } +} + +/** + * cqm_bitmap_check_range - Starting from begin, check whether count bits are + * free in the table, required: 1. This set of bits cannot cross step, 2. This + * group of bits must be 0 + * @table: bitmap table + * @step: steps + * @max_num: max num + * @begin: begin position + * @count: the count of bit to check + * Return: If check valid return begin position + */ +u32 cqm_bitmap_check_range(const ulong *table, u32 step, + u32 max_num, u32 begin, u32 count) +{ + u32 i = 0; + u32 end = (begin + (count - 1)); + + /* Single bit is not checked */ + if (count == 1) + return begin; + + /* End is out of bounds */ + if (end >= max_num) + return max_num; + + /* Bit check, if there is a bit other than 0, return next bit */ + for (i = (begin + 1); i <= end; i++) { + if (test_bit((s32)i, table)) + return i + 1; + } + + /* Check if it is in a different step */ + if ((begin & (~(step - 1))) != (end & (~(step - 1)))) + return (end & (~(step - 1))); + + /* If check pass, return begin position */ + return begin; +} + +static void cqm_bitmap_set_bit(struct cqm_bitmap_s *bitmap, u32 index, + u32 max_num, u32 count, bool update_last, + ulong *table) +{ + u32 i; + + /* Set 1 to the found bit and reset last */ + if (index < max_num) { + for (i = index; i < (index + count); i++) + set_bit(i, table); + + if (update_last) { + bitmap->last = (index + count); + if (bitmap->last >= bitmap->max_num) + bitmap->last = bitmap->reserved_top; + } + } +} + +/** + * cqm_bitmap_alloc - Allocate a bitmap index, 0 and 1 should not be used, Scan + * back from the place where you last applied, and needs to support the + * application of a series of consecutive indexes, and should not to cross trunk + * @table: bitmap table + * @step: steps + * @count: the count of bit to check + * @update_last: update last + * Return: Success - return the index, failure - return the max + */ +u32 cqm_bitmap_alloc(struct cqm_bitmap_s *bitmap, u32 step, u32 count, + bool update_last) +{ + u32 index = 0; + u32 max_num = bitmap->max_num; + u32 last = bitmap->last; + ulong *table = bitmap->table; + + spin_lock(&bitmap->lock); + + /* Search for a free bit from the last position */ + do { + index = find_next_zero_bit(table, max_num, last); + if (index < max_num) { + last = cqm_bitmap_check_range(table, step, + max_num, index, count); + } else { + break; + } + } while (last != index); + + /* The above search failed, search for a free bit from the beginning */ + if (index >= max_num) { + last = bitmap->reserved_top; + do { + index = find_next_zero_bit(table, max_num, last); + if (index < max_num) { + last = cqm_bitmap_check_range(table, step, + max_num, + index, count); + } else { + break; + } + } while (last != index); + } + cqm_bitmap_set_bit(bitmap, index, max_num, count, update_last, table); + spin_unlock(&bitmap->lock); + return index; +} + +/** + * cqm_bitmap_alloc_reserved - Allocate the reserve bit according to index + * @bitmap: bitmap table + * @count: count + * @index: the index of bitmap + * Return: Success - return the index, failure - return the max + */ +u32 cqm_bitmap_alloc_reserved(struct cqm_bitmap_s *bitmap, u32 count, u32 index) +{ + ulong *table = bitmap->table; + u32 ret_index = CQM_INDEX_INVALID; + + if ((index >= bitmap->reserved_top) || (index >= bitmap->max_num) || + (count != 1)) { + return CQM_INDEX_INVALID; + } + + spin_lock(&bitmap->lock); + + if (test_bit(index, table)) { + ret_index = CQM_INDEX_INVALID; + } else { + set_bit(index, table); + ret_index = index; + } + + spin_unlock(&bitmap->lock); + return ret_index; +} + +/** + * cqm_bitmap_free - Release a bitmap index + * @bitmap: bitmap table + * @index: the index of bitmap + * @count: count + */ +void cqm_bitmap_free(struct cqm_bitmap_s *bitmap, u32 index, u32 count) +{ + ulong i = 0; + + spin_lock(&bitmap->lock); + + for (i = index; i < (index + count); i++) + clear_bit((s32)i, bitmap->table); + + spin_unlock(&bitmap->lock); +} + +#define obj_table_section + +/** + * _cqm_object_table_init - Initialize a table of object and index + * @cqm_handle: cqm handle + * Return: 0 - success, negative - failure + */ +s32 __cqm_object_table_init(struct cqm_object_table_s *obj_table) +{ + rwlock_init(&obj_table->lock); + + obj_table->table = (struct cqm_object_s **)vmalloc(obj_table->max_num * + sizeof(void *)); + CQM_PTR_CHECK_RET(obj_table->table, return CQM_FAIL, + CQM_ALLOC_FAIL(table)); + memset(obj_table->table, 0, obj_table->max_num * sizeof(void *)); + return CQM_SUCCESS; +} + +/** + * cqm_object_table_init - Initialize the association table of object and index + * @cqm_handle: cqm handle + * Return: 0 - success, negative - failure + */ +s32 cqm_object_table_init(struct cqm_handle_s *cqm_handle) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_func_capability_s *capability = &cqm_handle->func_capability; + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_object_table_s *obj_table = NULL; + s32 ret = CQM_SUCCESS; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->obj_num == 0) { + cqm_info(handle->dev_hdl, + "Obj table init: cla_table_type %u, obj_num=0, don't init obj table\n", + cla_table->type); + continue; + } + + obj_table = &cla_table->obj_table; + + switch (cla_table->type) { + case CQM_BAT_ENTRY_T_QPC: + obj_table->max_num = capability->qpc_number; + ret = __cqm_object_table_init(obj_table); + break; + case CQM_BAT_ENTRY_T_MPT: + obj_table->max_num = capability->mpt_number; + ret = __cqm_object_table_init(obj_table); + break; + case CQM_BAT_ENTRY_T_SCQC: + obj_table->max_num = capability->scqc_number; + ret = __cqm_object_table_init(obj_table); + break; + case CQM_BAT_ENTRY_T_SRQC: + obj_table->max_num = capability->srqc_number; + ret = __cqm_object_table_init(obj_table); + break; + default: + break; + } + + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + "Obj table init: failed to init cla_table_type=%u, obj_num=0x%x\n", + cla_table->type, cla_table->obj_num); + goto err; + } + } + + return CQM_SUCCESS; + +err: + cqm_object_table_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * cqm_object_table_uninit - Deinitialize the association table of object and + * index + * @cqm_handle: cqm handle + */ +void cqm_object_table_uninit(struct cqm_handle_s *cqm_handle) +{ + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_object_table_s *obj_table = NULL; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + obj_table = &cla_table->obj_table; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { + if (obj_table->table) { + vfree(obj_table->table); + obj_table->table = NULL; + } + } + } +} + +/** + * cqm_object_table_insert - Insert an object, turn off the soft interrupt + * @cqm_handle: cqm handle + * @object_table: object table + * @index: the index of table + * @obj: the object to insert + * Return: 0 - success, negative - failure + */ +s32 cqm_object_table_insert(struct cqm_handle_s *cqm_handle, + struct cqm_object_table_s *object_table, u32 index, + struct cqm_object_s *obj) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + if (index >= object_table->max_num) { + cqm_err(handle->dev_hdl, "Obj table insert: index 0x%x exceeds max_num 0x%x\n", + index, object_table->max_num); + return CQM_FAIL; + } + + write_lock(&object_table->lock); + + if (!object_table->table[index]) { + object_table->table[index] = obj; + write_unlock(&object_table->lock); + return CQM_SUCCESS; + } + write_unlock(&object_table->lock); + cqm_err(handle->dev_hdl, "Obj table insert: object_table->table[0x%x] has been inserted\n", + index); + return CQM_FAIL; +} + +/** + * cqm_object_table_remove - remove an object + * @cqm_handle: cqm handle + * @object_table: object table + * @index: the index of table + * @obj: the object to remove + * Return: 0 - success, negative - failure + */ +void cqm_object_table_remove(struct cqm_handle_s *cqm_handle, + struct cqm_object_table_s *object_table, + u32 index, const struct cqm_object_s *obj) +{ + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + if (index >= object_table->max_num) { + cqm_err(handle->dev_hdl, "Obj table remove: index 0x%x exceeds max_num 0x%x\n", + index, object_table->max_num); + return; + } + + write_lock(&object_table->lock); + + if ((object_table->table[index]) && + (object_table->table[index] == obj)) { + object_table->table[index] = NULL; + } else { + cqm_err(handle->dev_hdl, "Obj table remove: object_table->table[0x%x] has been removed\n", + index); + } + + write_unlock(&object_table->lock); +} + +/** + * cqm_srq_used_rq_delete - Delete rq in TOE SRQ mode + * @object: cqm object + */ +void cqm_srq_used_rq_delete(struct cqm_object_s *object) +{ + struct cqm_queue_s *common = container_of(object, struct cqm_queue_s, + object); + struct cqm_nonrdma_qinfo_s *qinfo = container_of( + common, + struct cqm_nonrdma_qinfo_s, + common); + u32 link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size; + struct cqm_srq_linkwqe_s *srq_link_wqe = NULL; + dma_addr_t addr; + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + (common->object.cqm_handle); + struct hifc_hwdev *handle = cqm_handle->ex_handle; + + /* The current SRQ solution does not support the case where RQ + * initialization without container, which may cause error when RQ + * resources are released. So RQ initializes with only one container, + * and releases only one contaienr when resourced are released. + */ + CQM_PTR_CHECK_NO_RET( + common->head_container, "Rq del: rq has no contianer to release\n", + return); + + /* Get current container pa from link wqe, and ummap it */ + srq_link_wqe = (struct cqm_srq_linkwqe_s *)(common->head_container + + link_wqe_offset); + /* Convert only the big endian of the wqe part of the link */ + cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct cqm_linkwqe_s) >> 2); + + addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_gpa_h, + srq_link_wqe->current_buffer_gpa_l); + if (addr == 0) { + cqm_err(handle->dev_hdl, "Rq del: buffer physical addr is null\n"); + return; + } + pci_unmap_single(cqm_handle->dev, addr, qinfo->container_size, + PCI_DMA_BIDIRECTIONAL); + + /* Get current container va from link wqe, and free it */ + addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_addr_h, + srq_link_wqe->current_buffer_addr_l); + if (addr == 0) { + cqm_err(handle->dev_hdl, "Rq del: buffer virtual addr is null\n"); + return; + } + kfree((void *)addr); +} + +#define obj_intern_if_section + +/** + * cqm_qpc_mpt_bitmap_alloc - Apply index from bitmap when creating qpc or mpt + * @object: cqm object + * @cla_table: cla table + * Return: 0 - success, negative - failure + */ +s32 cqm_qpc_mpt_bitmap_alloc(struct cqm_object_s *object, + struct cqm_cla_table_s *cla_table) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_qpc_mpt_s *common = container_of(object, + struct cqm_qpc_mpt_s, + object); + struct cqm_qpc_mpt_info_s *qpc_mpt_info = + container_of( + common, + struct cqm_qpc_mpt_info_s, + common); + struct cqm_bitmap_s *bitmap = &cla_table->bitmap; + u32 index = 0; + u32 count = 0; + + count = (ALIGN(object->object_size, cla_table->obj_size)) / + cla_table->obj_size; + qpc_mpt_info->index_count = count; + + if (qpc_mpt_info->common.xid == CQM_INDEX_INVALID) { + /* Allocate index normally */ + index = cqm_bitmap_alloc( + bitmap, + 1 << (cla_table->z + 1), + count, + cqm_handle->func_capability.xid_alloc_mode); + if (index < bitmap->max_num) { + qpc_mpt_info->common.xid = index; + } else { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_bitmap_alloc)); + return CQM_FAIL; + } + } else { + /* Allocate reserved index */ + index = cqm_bitmap_alloc_reserved( + bitmap, count, + qpc_mpt_info->common.xid); + if (index != qpc_mpt_info->common.xid) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_bitmap_alloc_reserved)); + return CQM_FAIL; + } + } + + return CQM_SUCCESS; +} + +static struct cqm_cla_table_s *cqm_qpc_mpt_prepare_cla_table( + struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + + struct cqm_cla_table_s *cla_table = NULL; + + /* Get the corresponding cla table */ + if (object->object_type == CQM_OBJECT_SERVICE_CTX) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); + } else { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); + return NULL; + } + + CQM_PTR_CHECK_RET(cla_table, return NULL, + CQM_FUNCTION_FAIL(cqm_cla_table_get)); + + /* Allocate index for bitmap */ + if (cqm_qpc_mpt_bitmap_alloc(object, cla_table) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_qpc_mpt_bitmap_alloc)); + return NULL; + } + + return cla_table; +} + +/** + * cqm_qpc_mpt_create - Create qpc or mpt + * @object: cqm object + * Return: 0 - success, negative - failure + */ +s32 cqm_qpc_mpt_create(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_qpc_mpt_s *common = + container_of(object, struct cqm_qpc_mpt_s, object); + struct cqm_qpc_mpt_info_s *qpc_mpt_info = + container_of(common, struct cqm_qpc_mpt_info_s, common); + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_bitmap_s *bitmap = NULL; + struct cqm_object_table_s *object_table = NULL; + u32 index = 0; + u32 count = 0; + + cla_table = cqm_qpc_mpt_prepare_cla_table(object); + CQM_PTR_CHECK_RET(cla_table, return CQM_FAIL, + CQM_FUNCTION_FAIL(cqm_qpc_mpt_prepare_cla_table)); + + bitmap = &cla_table->bitmap; + index = qpc_mpt_info->common.xid; + count = qpc_mpt_info->index_count; + + /* Find the trunk page from BAT/CLA and allocate the buffer, the + * business needs to ensure that the released buffer has been cleared + */ + if (cla_table->alloc_static == true) { + qpc_mpt_info->common.vaddr = + cqm_static_qpc_cla_get(cqm_handle, cla_table, + index, count, &common->paddr); + } else { + qpc_mpt_info->common.vaddr = + cqm_cla_get(cqm_handle, cla_table, + index, count, &common->paddr); + } + if (!qpc_mpt_info->common.vaddr) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_get)); + cqm_err(handle->dev_hdl, + "Qpc mpt init: qpc mpt vaddr is null, cla_table->alloc_static=%d\n", + cla_table->alloc_static); + goto err1; + } + + /* Associate index with object, FC executes in interrupt context */ + object_table = &cla_table->obj_table; + + if (cqm_object_table_insert(cqm_handle, object_table, index, object) != + CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_object_table_insert)); + goto err2; + } + + return CQM_SUCCESS; + +err2: + cqm_cla_put(cqm_handle, cla_table, index, count); +err1: + cqm_bitmap_free(bitmap, index, count); + return CQM_FAIL; +} + +/** + * cqm_qpc_mpt_delete - Delete qpc or mpt + * @object: cqm object + */ +void cqm_qpc_mpt_delete(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_qpc_mpt_s *common = container_of(object, + struct cqm_qpc_mpt_s, + object); + struct cqm_qpc_mpt_info_s *qpc_mpt_info = container_of( + common, + struct cqm_qpc_mpt_info_s, + common); + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_bitmap_s *bitmap = NULL; + struct cqm_object_table_s *object_table = NULL; + u32 index = qpc_mpt_info->common.xid; + u32 count = qpc_mpt_info->index_count; + + /* Find the response cla table */ + atomic_inc(&cqm_handle->ex_handle->hw_stats.cqm_stats.cqm_qpc_mpt_delete_cnt); + + if (object->object_type == CQM_OBJECT_SERVICE_CTX) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); + } else { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); + return; + } + + CQM_PTR_CHECK_NO_RET( + cla_table, CQM_FUNCTION_FAIL(cqm_cla_table_get), return); + + /* Disassociate index with object */ + object_table = &cla_table->obj_table; + + cqm_object_table_remove(cqm_handle, object_table, index, object); + + /* Wait for the completion and ensure that all references to the QPC + * are completed + */ + if (atomic_dec_and_test(&object->refcount)) { + complete(&object->free); + } else { + cqm_err(handle->dev_hdl, + "Qpc mpt del: object is referred by others, has to wait for completion\n"); + } + + /* The QPC static allocation needs to be non-blocking, and the service + * guarantees that the QPC is completed when the QPC is deleted + */ + if (cla_table->alloc_static == false) + wait_for_completion(&object->free); + /* Free qpc buffer */ + cqm_cla_put(cqm_handle, cla_table, index, count); + + /* Free index into bitmap */ + bitmap = &cla_table->bitmap; + cqm_bitmap_free(bitmap, index, count); +} + +/** + * cqm_linkwqe_fill - Fill link wqe for non RDMA queue buffer + * @buf: cqm buffer + * @wqe_per_buf: not include link wqe + * @wqe_size: wqe size + * @wqe_number: not include link wqe + * @tail: true linkwqe must be at the tail, false linkwqe may not be at the tail + * @link_mode: link wqe mode + */ +void cqm_linkwqe_fill(struct cqm_buf_s *buf, + u32 wqe_per_buf, + u32 wqe_size, + u32 wqe_number, + bool tail, + u8 link_mode) +{ + struct cqm_linkwqe_s *wqe = NULL; + struct cqm_linkwqe_128b_s *linkwqe = NULL; + u8 *va = NULL; + u32 i = 0; + dma_addr_t addr; + + /* Except for the last buffer, the linkwqe of other buffers is directly + * filled to the tail + */ + for (i = 0; i < buf->buf_number; i++) { + va = (u8 *)(buf->buf_list[i].va); + + if (i != (buf->buf_number - 1)) { + wqe = (struct cqm_linkwqe_s *)(va + (u32)(wqe_size * + wqe_per_buf)); + wqe->wf = CQM_WQE_WF_LINK; + wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; + wqe->lp = CQM_LINK_WQE_LP_INVALID; + /* The Obit of valid link wqe needs to be set to 1, and + * each service needs to confirm that o-bit=1 means + * valid, o-bit=0 means invalid + */ + wqe->o = CQM_LINK_WQE_OWNER_VALID; + addr = buf->buf_list[(u32)(i + 1)].pa; + wqe->next_page_gpa_h = CQM_ADDR_HI(addr); + wqe->next_page_gpa_l = CQM_ADDR_LW(addr); + } else { + /* The last buffer of Linkwqe must fill specially */ + if (tail == true) { + /* Must be filled at the end of the page */ + wqe = (struct cqm_linkwqe_s *)(va + + (u32)(wqe_size * wqe_per_buf)); + } else { + /* The last linkwqe is filled immediately after + * the last wqe + */ + wqe = (struct cqm_linkwqe_s *) + (va + (u32)(wqe_size * + (wqe_number - wqe_per_buf * + (buf->buf_number - 1)))); + } + wqe->wf = CQM_WQE_WF_LINK; + wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; + + /* In link mode, the last link wqe is invalid, In ring + * mode, the last link wqe is valid, pointing to the + * home page, and lp is set + */ + if (link_mode == CQM_QUEUE_LINK_MODE) { + wqe->o = CQM_LINK_WQE_OWNER_INVALID; + } else { + /* The lp field of the last link_wqe is filled + * with 1,indicating that the o-bit is flipped + * from here + */ + wqe->lp = CQM_LINK_WQE_LP_VALID; + wqe->o = CQM_LINK_WQE_OWNER_VALID; + addr = buf->buf_list[0].pa; + wqe->next_page_gpa_h = CQM_ADDR_HI(addr); + wqe->next_page_gpa_l = CQM_ADDR_LW(addr); + } + } + + if (wqe_size == 128) { + /* The 128B wqe before and after 64B have obit need to be + * assigned, For IFOE, 63th penultimate bit of last 64B is + * obit, for TOE, 157th penultimate bit of last 64B is obit + */ + linkwqe = (struct cqm_linkwqe_128b_s *)wqe; + linkwqe->second_64b.third_16B.bs.toe_o = + CQM_LINK_WQE_OWNER_VALID; + linkwqe->second_64b.forth_16B.bs.ifoe_o = + CQM_LINK_WQE_OWNER_VALID; + + /* big endian conversion */ + cqm_swab32((u8 *)wqe, + sizeof(struct cqm_linkwqe_128b_s) >> 2); + } else { + /* big endian conversion */ + cqm_swab32((u8 *)wqe, + sizeof(struct cqm_linkwqe_s) >> 2); + } + } +} + +static s32 cqm_nonrdma_queue_ctx_create_srq(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_queue_s *common = container_of(object, + struct cqm_queue_s, object); + struct cqm_nonrdma_qinfo_s *qinfo = container_of( + common, + struct cqm_nonrdma_qinfo_s, + common); + s32 shift = 0; + + shift = cqm_shift(qinfo->q_ctx_size); + common->q_ctx_vaddr = (u8 *)cqm_kmalloc_align( + qinfo->q_ctx_size, + GFP_KERNEL | __GFP_ZERO, + (u16)shift); + if (!common->q_ctx_vaddr) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_ctx_vaddr)); + return CQM_FAIL; + } + + common->q_ctx_paddr = + pci_map_single(cqm_handle->dev, common->q_ctx_vaddr, + qinfo->q_ctx_size, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, common->q_ctx_paddr)) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_ctx_vaddr)); + cqm_kfree_align(common->q_ctx_vaddr); + common->q_ctx_vaddr = NULL; + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static s32 cqm_nonrdma_queue_ctx_create_scq(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_queue_s *common = container_of(object, + struct cqm_queue_s, + object); + struct cqm_nonrdma_qinfo_s *qinfo = container_of( + common, + struct cqm_nonrdma_qinfo_s, + common); + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_bitmap_s *bitmap = NULL; + struct cqm_object_table_s *object_table = NULL; + + /* Find the corresponding cla table */ + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); + if (!cla_table) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_table_get)); + return CQM_FAIL; + } + + /* Allocate index for bitmap */ + bitmap = &cla_table->bitmap; + qinfo->index_count = (ALIGN(qinfo->q_ctx_size, cla_table->obj_size)) / + cla_table->obj_size; + qinfo->common.index = cqm_bitmap_alloc(bitmap, 1 << (cla_table->z + 1), + qinfo->index_count, cqm_handle->func_capability.xid_alloc_mode); + if (qinfo->common.index >= bitmap->max_num) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_alloc)); + return CQM_FAIL; + } + + /* Find the trunk page from BAT/CLA and allocate buffer */ + common->q_ctx_vaddr = cqm_cla_get(cqm_handle, cla_table, + qinfo->common.index, + qinfo->index_count, + &common->q_ctx_paddr); + if (!common->q_ctx_vaddr) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_get)); + cqm_bitmap_free(bitmap, qinfo->common.index, + qinfo->index_count); + return CQM_FAIL; + } + + /* Associate index with object */ + object_table = &cla_table->obj_table; + + if (cqm_object_table_insert( + cqm_handle, object_table, + qinfo->common.index, object) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_object_table_insert)); + cqm_cla_put(cqm_handle, cla_table, qinfo->common.index, + qinfo->index_count); + cqm_bitmap_free(bitmap, qinfo->common.index, + qinfo->index_count); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +s32 cqm_nonrdma_queue_ctx_create(struct cqm_object_s *object) +{ + if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) + return cqm_nonrdma_queue_ctx_create_srq(object); + else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) + return cqm_nonrdma_queue_ctx_create_scq(object); + + return CQM_SUCCESS; +} + +#define CQM_NORDMA_CHECK_WEQ_NUMBER(number) \ + (((number) < CQM_CQ_DEPTH_MIN) || ((number) > CQM_CQ_DEPTH_MAX)) + +/** + * cqm_nonrdma_queue_create - Create queue for non RDMA service + * @buf: cqm object + * Return: 0 - success, negative - failure + */ +s32 cqm_nonrdma_queue_create(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_service_s *service = &cqm_handle->service; + struct cqm_queue_s *common = container_of(object, + struct cqm_queue_s, + object); + struct cqm_nonrdma_qinfo_s *qinfo = container_of( + common, + struct cqm_nonrdma_qinfo_s, + common); + struct cqm_buf_s *q_room_buf = &common->q_room_buf_1; + u32 wqe_number = qinfo->common.object.object_size; + u32 wqe_size = qinfo->wqe_size; + u32 order = service->buf_order; + u32 buf_number = 0; + u32 buf_size = 0; + bool tail = false; /* Whether linkwqe is at the end of the page */ + + /* When creating CQ/SCQ queue, the page size is 4k, linkwqe must be at + * the end of the page + */ + if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { + /* Depth must be 2^n alignment, depth range is 256~32K */ + if (CQM_NORDMA_CHECK_WEQ_NUMBER(wqe_number)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_number)); + return CQM_FAIL; + } + if (cqm_check_align(wqe_number) == false) { + cqm_err(handle->dev_hdl, "Nonrdma queue alloc: wqe_number is not align on 2^n\n"); + return CQM_FAIL; + } + + order = CQM_4K_PAGE_ORDER; /* wqe page is 4k */ + tail = true; /* linkwqe must be at the end of the page */ + buf_size = CQM_4K_PAGE_SIZE; + } else { + buf_size = PAGE_SIZE << order; + } + + /* Calculate how many buffers are required, -1 is to deduct link wqe in + * a buf + */ + qinfo->wqe_per_buf = (buf_size / wqe_size) - 1; + /* The depth from service includes the number of linkwqe */ + buf_number = ALIGN((wqe_size * wqe_number), buf_size) / buf_size; + /* Allocate cqm buffer */ + q_room_buf->buf_number = buf_number; + q_room_buf->buf_size = buf_size; + q_room_buf->page_number = (buf_number << order); + if (cqm_buf_alloc(cqm_handle, q_room_buf, false) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc)); + return CQM_FAIL; + } + /* Fill link wqe, (wqe_number - buf_number) is the number of wqe without + * linkwqe + */ + cqm_linkwqe_fill(q_room_buf, qinfo->wqe_per_buf, wqe_size, + wqe_number - buf_number, tail, + common->queue_link_mode); + + /* Create queue header */ + qinfo->common.q_header_vaddr = + (struct cqm_queue_header_s *)cqm_kmalloc_align( + sizeof(struct cqm_queue_header_s), + GFP_KERNEL | __GFP_ZERO, CQM_QHEAD_ALIGN_ORDER); + if (!qinfo->common.q_header_vaddr) { + cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_header_vaddr)); + goto err1; + } + + common->q_header_paddr = + pci_map_single(cqm_handle->dev, + qinfo->common.q_header_vaddr, + sizeof(struct cqm_queue_header_s), + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, common->q_header_paddr)) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_header_vaddr)); + goto err2; + } + + /* Create queue ctx */ + if (cqm_nonrdma_queue_ctx_create(object) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_nonrdma_queue_ctx_create)); + goto err3; + } + + return CQM_SUCCESS; + +err3: + pci_unmap_single(cqm_handle->dev, common->q_header_paddr, + sizeof(struct cqm_queue_header_s), + PCI_DMA_BIDIRECTIONAL); +err2: + cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; +err1: + cqm_buf_free(q_room_buf, cqm_handle->dev); + return CQM_FAIL; +} + +static void cqm_nonrdma_queue_free_scq_srq(struct cqm_object_s *object, + struct cqm_cla_table_s *cla_table) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct cqm_queue_s *common = container_of(object, + struct cqm_queue_s, + object); + struct cqm_nonrdma_qinfo_s *qinfo = + container_of(common, struct cqm_nonrdma_qinfo_s, common); + struct cqm_buf_s *q_room_buf = &common->q_room_buf_1; + u32 index = qinfo->common.index; + u32 count = qinfo->index_count; + struct cqm_bitmap_s *bitmap = NULL; + + /* If it is in TOE SRQ mode, delete the RQ */ + if (common->queue_link_mode == CQM_QUEUE_TOE_SRQ_LINK_MODE) { + cqm_dbg("Nonrdma queue del: delete srq used rq\n"); + cqm_srq_used_rq_delete(&common->object); + } else { + /* Free it if exists q room */ + cqm_buf_free(q_room_buf, cqm_handle->dev); + } + /* Free SRQ or SCQ ctx */ + if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) { + /* ctx of nonrdma's SRQ is applied independently */ + if (common->q_ctx_vaddr) { + pci_unmap_single(cqm_handle->dev, common->q_ctx_paddr, + qinfo->q_ctx_size, + PCI_DMA_BIDIRECTIONAL); + + cqm_kfree_align(common->q_ctx_vaddr); + common->q_ctx_vaddr = NULL; + } + } else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { + /* The ctx of SCQ of nonrdma is managed by BAT/CLA */ + cqm_cla_put(cqm_handle, cla_table, index, count); + + /* Release index into bitmap */ + bitmap = &cla_table->bitmap; + cqm_bitmap_free(bitmap, index, count); + } +} + +/** + * cqm_nonrdma_queue_delete - Free queue for non RDMA service + * @buf: cqm object + */ +void cqm_nonrdma_queue_delete(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = (struct cqm_handle_s *) + object->cqm_handle; + struct hifc_hwdev *handle = cqm_handle->ex_handle; + struct cqm_queue_s *common = container_of(object, + struct cqm_queue_s, object); + struct cqm_nonrdma_qinfo_s *qinfo = container_of( + common, + struct cqm_nonrdma_qinfo_s, + common); + struct cqm_bat_table_s *bat_table = &cqm_handle->bat_table; + struct cqm_cla_table_s *cla_table = NULL; + struct cqm_object_table_s *object_table = NULL; + u32 index = qinfo->common.index; + + atomic_inc(&cqm_handle->ex_handle->hw_stats.cqm_stats.cqm_nonrdma_queue_delete_cnt); + + /* SCQ has independent SCQN association */ + if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); + CQM_PTR_CHECK_NO_RET( + cla_table, + CQM_FUNCTION_FAIL(cqm_cla_table_get), + return); + + /* index and object disassociate */ + object_table = &cla_table->obj_table; + + cqm_object_table_remove(cqm_handle, object_table, + index, object); + } + + /* Wait for the completion and ensure that all references to the QPC + * are completed + */ + if (atomic_dec_and_test(&object->refcount)) + complete(&object->free); + else + cqm_err(handle->dev_hdl, "Nonrdma queue del: object is referred by others, has to wait for completion\n"); + wait_for_completion(&object->free); + + /* Free it if exists q header */ + if (qinfo->common.q_header_vaddr) { + pci_unmap_single(cqm_handle->dev, common->q_header_paddr, + sizeof(struct cqm_queue_header_s), + PCI_DMA_BIDIRECTIONAL); + + cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; + } + cqm_nonrdma_queue_free_scq_srq(object, cla_table); +} + +#define obj_extern_if_section + +/** + * cqm_object_qpc_mpt_create - Create QPC and MPT + * @ex_handle: hw dev handle + * @service_type: service type + * @object_type: must be mpt and ctx + * @object_size: the unit is byte + * @object_priv: the private structure for service, can be NULL + * @index: get the reserved qpn based on this value, if wants to automatically + * allocate it, the value should be CQM_INDEX_INVALID + * Return: service ctx + */ +struct cqm_qpc_mpt_s *cqm_object_qpc_mpt_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 object_size, void *object_priv, + u32 index) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_qpc_mpt_info_s *qpc_mpt_info = NULL; + s32 ret = CQM_FAIL; + + CQM_PTR_CHECK_RET(ex_handle, return NULL, CQM_PTR_NULL(ex_handle)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_qpc_mpt_create_cnt); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_RET(cqm_handle, return NULL, CQM_PTR_NULL(cqm_handle)); + + /* If service does not register, returns NULL */ + if (cqm_handle->service.has_register == false) { + cqm_err(handle->dev_hdl, "service is not register"); + return NULL; + } + + if (object_type != CQM_OBJECT_SERVICE_CTX) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return NULL; + } + + qpc_mpt_info = (struct cqm_qpc_mpt_info_s *) + kmalloc(sizeof(struct cqm_qpc_mpt_info_s), + GFP_ATOMIC | __GFP_ZERO); + CQM_PTR_CHECK_RET(qpc_mpt_info, return NULL, + CQM_ALLOC_FAIL(qpc_mpt_info)); + + qpc_mpt_info->common.object.object_type = object_type; + qpc_mpt_info->common.object.object_size = object_size; + atomic_set(&qpc_mpt_info->common.object.refcount, 1); + init_completion(&qpc_mpt_info->common.object.free); + qpc_mpt_info->common.object.cqm_handle = cqm_handle; + qpc_mpt_info->common.xid = index; + qpc_mpt_info->common.priv = object_priv; + + ret = cqm_qpc_mpt_create(&qpc_mpt_info->common.object); + if (ret == CQM_SUCCESS) + return &qpc_mpt_info->common; + + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_qpc_mpt_create)); + kfree(qpc_mpt_info); + return NULL; +} + +/** + * cqm_object_fc_srq_create - Create RQ for FC, the number of valid wqe in the + * queue must be meet the incoming wqe number. Because linkwqe can only be + * filled at the end of the page, the actual effective number exceeds demand, + * need to inform the number of business creation. + * @ex_handle: hw dev handle + * @service_type: service type + * @object_type: must be CQM_OBJECT_NONRDMA_SRQ + * @wqe_number: valid wqe number + * @wqe_size: wqe size + * @object_priv: the private structure for service + * Return: srq structure + */ +struct cqm_queue_s *cqm_object_fc_srq_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv) +{ + struct cqm_nonrdma_qinfo_s *nonrdma_qinfo = NULL; + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_service_s *service = NULL; + u32 valid_wqe_per_buffer = 0; + u32 wqe_sum = 0; /* includes linkwqe, normal wqe */ + u32 buf_size = 0; + u32 buf_num = 0; + s32 ret = CQM_FAIL; + + CQM_PTR_CHECK_RET(ex_handle, return NULL, CQM_PTR_NULL(ex_handle)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_fc_srq_create_cnt); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_RET(cqm_handle, return NULL, CQM_PTR_NULL(cqm_handle)); + + /* service_type must be FC */ + if (cqm_handle->service.has_register == false) { + cqm_err(handle->dev_hdl, "service is not register\n"); + return NULL; + } + + /* wqe_size can not exceed PAGE_SIZE and should not be 0, and must be + * 2^n aligned. + */ + if ((wqe_size >= PAGE_SIZE) || (cqm_check_align(wqe_size) == false)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); + return NULL; + } + + /* FC's RQ is SRQ (unlike TOE's SRQ, fc is that all packets received by + * the stream will be put on the same rq, and TOE's srq is similar to + * rq's resource pool) + */ + if (object_type != CQM_OBJECT_NONRDMA_SRQ) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return NULL; + } + + service = &cqm_handle->service; + buf_size = PAGE_SIZE << (service->buf_order); + valid_wqe_per_buffer = buf_size / wqe_size - 1; /* Minus 1 link wqe */ + buf_num = wqe_number / valid_wqe_per_buffer; + if (wqe_number % valid_wqe_per_buffer != 0) + buf_num++; + + /* Calculate the total number of all wqe */ + wqe_sum = buf_num * (valid_wqe_per_buffer + 1); + nonrdma_qinfo = (struct cqm_nonrdma_qinfo_s *) + kmalloc(sizeof(struct cqm_nonrdma_qinfo_s), + GFP_KERNEL | __GFP_ZERO); + + CQM_PTR_CHECK_RET(nonrdma_qinfo, return NULL, + CQM_ALLOC_FAIL(nonrdma_qinfo)); + + /* Initialize object members */ + nonrdma_qinfo->common.object.object_type = object_type; + /* The total number of all wqe */ + nonrdma_qinfo->common.object.object_size = wqe_sum; + atomic_set(&nonrdma_qinfo->common.object.refcount, 1); + init_completion(&nonrdma_qinfo->common.object.free); + nonrdma_qinfo->common.object.cqm_handle = cqm_handle; + + /* Initialize the doorbell used by the current queue, default is the + * hardware doorbell + */ + nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; + nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE; + + /* Initialize external public members */ + nonrdma_qinfo->common.priv = object_priv; + nonrdma_qinfo->common.valid_wqe_num = wqe_sum - buf_num; + + /* Initialize internal private members */ + nonrdma_qinfo->wqe_size = wqe_size; + /* The SRQ for FC, which needs to create ctx */ + nonrdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size; + + ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); + if (ret == CQM_SUCCESS) + return &nonrdma_qinfo->common; + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_nonrdma_queue_create)); + kfree(nonrdma_qinfo); + return NULL; +} + +static int cqm_object_nonrdma_queue_create_check( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 wqe_size) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + + CQM_PTR_CHECK_RET(ex_handle, return CQM_FAIL, CQM_PTR_NULL(ex_handle)); + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_nonrdma_queue_create_cnt); + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + CQM_PTR_CHECK_RET(cqm_handle, return CQM_FAIL, + CQM_PTR_NULL(cqm_handle)); + + /* If service does not register, returns NULL */ + if (cqm_handle->service.has_register == false) { + cqm_err(handle->dev_hdl, "service is not register\n"); + return CQM_FAIL; + } + /* Wqe size cannot exceed PAGE_SIZE, cannot be 0, and must be 2^n + * aligned. cqm_check_align check excludes 0, 1, non 2^n alignment + */ + if ((wqe_size >= PAGE_SIZE) || (cqm_check_align(wqe_size) == false)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); + return CQM_FAIL; + } + + /* Supported Nonrdma queue: RQ, SQ, SRQ, CQ, SCQ */ + if ((object_type < CQM_OBJECT_NONRDMA_EMBEDDED_RQ) || + (object_type > CQM_OBJECT_NONRDMA_SCQ)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * cqm_object_nonrdma_queue_create - Create queues for non-RDMA services + * @ex_handle: hw dev handle + * @service_type: service type + * @object_type: can create embedded RQ/SQ/CQ and SRQ/SCQ + * @wqe_number: wqe number, including link wqe + * @wqe_size: wqe size, nust be 2^n + * @object_priv: the private structure for service, can be NULL + * Return: srq structure + */ +struct cqm_queue_s *cqm_object_nonrdma_queue_create( + void *ex_handle, + enum cqm_object_type_e object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv) +{ + struct hifc_hwdev *handle = (struct hifc_hwdev *)ex_handle; + struct cqm_handle_s *cqm_handle = NULL; + struct cqm_nonrdma_qinfo_s *nonrdma_qinfo = NULL; + struct cqm_service_s *service = NULL; + s32 ret = CQM_FAIL; + + cqm_handle = (struct cqm_handle_s *)(handle->cqm_hdl); + if (cqm_object_nonrdma_queue_create_check(ex_handle, + object_type, + wqe_size) == CQM_FAIL) { + return NULL; + } + + nonrdma_qinfo = (struct cqm_nonrdma_qinfo_s *) + kmalloc(sizeof(struct cqm_nonrdma_qinfo_s), + GFP_KERNEL | __GFP_ZERO); + CQM_PTR_CHECK_RET(nonrdma_qinfo, return NULL, + CQM_ALLOC_FAIL(nonrdma_qinfo)); + + /* Initialize object members */ + nonrdma_qinfo->common.object.object_type = object_type; + nonrdma_qinfo->common.object.object_size = wqe_number; + atomic_set(&nonrdma_qinfo->common.object.refcount, 1); + init_completion(&nonrdma_qinfo->common.object.free); + nonrdma_qinfo->common.object.cqm_handle = cqm_handle; + + /* Initialize the doorbell used by the current queue, default is the + * hardware doorbell + */ + nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; + nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE; + + /* Initialize external public members */ + nonrdma_qinfo->common.priv = object_priv; + + /* Initialize internal private members */ + nonrdma_qinfo->wqe_size = wqe_size; + service = &cqm_handle->service; + switch (object_type) { + case CQM_OBJECT_NONRDMA_SCQ: + nonrdma_qinfo->q_ctx_size = + service->service_template.scq_ctx_size; + break; + case CQM_OBJECT_NONRDMA_SRQ: + /* The creation for SRQ uses a dedicated interface */ + nonrdma_qinfo->q_ctx_size = + service->service_template.srq_ctx_size; + break; + default: + break; + } + + ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); + if (ret == CQM_SUCCESS) + return &nonrdma_qinfo->common; + + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_nonrdma_queue_create)); + kfree(nonrdma_qinfo); + return NULL; +} + +s32 cqm_qpc_mpt_delete_ret(struct cqm_object_s *object) +{ + u32 object_type = 0; + + object_type = object->object_type; + switch (object_type) { + case CQM_OBJECT_SERVICE_CTX: + cqm_qpc_mpt_delete(object); + return CQM_SUCCESS; + default: + return CQM_FAIL; + } +} + +s32 cqm_nonrdma_queue_delete_ret(struct cqm_object_s *object) +{ + u32 object_type = 0; + + object_type = object->object_type; + switch (object_type) { + case CQM_OBJECT_NONRDMA_SCQ: + case CQM_OBJECT_NONRDMA_SRQ: + cqm_nonrdma_queue_delete(object); + return CQM_SUCCESS; + default: + return CQM_FAIL; + } +} + +/** + * cqm_object_nonrdma_queue_create - Delete the created object, the function + * will sleep and wait for all operations on the object to complete before + * returning + * @object: cqm object + */ +void cqm_object_delete(struct cqm_object_s *object) +{ + struct cqm_handle_s *cqm_handle = NULL; + struct hifc_hwdev *handle = NULL; + + CQM_PTR_CHECK_NO_RET(object, CQM_PTR_NULL(object), return); + if (!object->cqm_handle) { + pr_err("[CQM]Obj del: cqm_handle is null, refcount %d\n", + (int)object->refcount.counter); + kfree(object); + return; + } + cqm_handle = (struct cqm_handle_s *)object->cqm_handle; + + if (!cqm_handle->ex_handle) { + pr_err("[CQM]Obj del: ex_handle is null, refcount %d\n", + (int)object->refcount.counter); + kfree(object); + return; + } + handle = cqm_handle->ex_handle; + + if (cqm_qpc_mpt_delete_ret(object) == CQM_SUCCESS) { + kfree(object); + return; + } + + if (cqm_nonrdma_queue_delete_ret(object) == CQM_SUCCESS) { + kfree(object); + return; + } + + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); + kfree(object); +} diff --git a/drivers/scsi/huawei/hifc/hifc_cqm_object.h b/drivers/scsi/huawei/hifc/hifc_cqm_object.h new file mode 100644 index 000000000000..308166ddd534 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_cqm_object.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __CQM_OBJECT_H__ +#define __CQM_OBJECT_H__ + +#define CLA_TABLE_PAGE_ORDER (0) +#define CQM_4K_PAGE_ORDER (0) + +#define CQM_CQ_DEPTH_MAX (32768) +#define CQM_CQ_DEPTH_MIN (256) +#define CQM_BAT_SIZE_FT_PF (192) + +#define CQM_WQE_WF_LINK 1 +#define CQM_WQE_WF_NORMAL 0 +#define CQM_QUEUE_LINK_MODE 0 +#define CQM_QUEUE_RING_MODE 1 +#define CQM_4K_PAGE_SIZE 4096 + +#define CQM_SUCCESS 0 +#define CQM_FAIL -1 +#define CQM_QUEUE_TOE_SRQ_LINK_MODE 2 +#define CQM_CMD_TIMEOUT 10000 /*ms*/ + +#define CQM_INDEX_INVALID ~(0U) +#define CQM_INDEX_RESERVED (0xfffff) /* reserved by cqm alloc */ + +enum cqm_bat_entry_type_e { + CQM_BAT_ENTRY_T_CFG = 0, + CQM_BAT_ENTRY_T_HASH, + CQM_BAT_ENTRY_T_QPC, + CQM_BAT_ENTRY_T_SCQC, + CQM_BAT_ENTRY_T_SRQC, + CQM_BAT_ENTRY_T_MPT, + CQM_BAT_ENTRY_T_GID, + CQM_BAT_ENTRY_T_LUN, + CQM_BAT_ENTRY_T_TASKMAP, + CQM_BAT_ENTRY_T_L3I, + CQM_BAT_ENTRY_T_CHILDC, + CQM_BAT_ENTRY_T_TIMER, + CQM_BAT_ENTRY_T_XID2CID, + CQM_BAT_ENTRY_T_REORDER, + + CQM_BAT_ENTRY_T_INVALID = 0xff, +}; + +enum cqm_cmd_type_e { + CQM_CMD_T_INVALID = 0, + CQM_CMD_T_BAT_UPDATE, + CQM_CMD_T_CLA_UPDATE, + CQM_CMD_T_BLOOMFILTER_SET, + CQM_CMD_T_BLOOMFILTER_CLEAR, + CQM_CMD_T_COMPACT_SRQ_UPDATE, + CQM_CMD_T_CLA_CACHE_INVALID, + CQM_CMD_T_BLOOMFILTER_INIT, + QM_CMD_T_MAX +}; + +/*linkwqe*/ +#define CQM_LINK_WQE_CTRLSL_VALUE 2 +#define CQM_LINK_WQE_LP_VALID 1 +#define CQM_LINK_WQE_LP_INVALID 0 +#define CQM_LINK_WQE_OWNER_VALID 1 +#define CQM_LINK_WQE_OWNER_INVALID 0 + +/*CLA update mode*/ +#define CQM_CLA_RECORD_NEW_GPA 0 +#define CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID 1 +#define CQM_CLA_DEL_GPA_WITH_CACHE_INVALID 2 + +#define CQM_CLA_LVL_0 0 +#define CQM_CLA_LVL_1 1 +#define CQM_CLA_LVL_2 2 + +#define CQM_MAX_INDEX_BIT 19 +#define CQM_CHIP_CACHELINE 256 +enum cqm_cmd_ack_type_e { + CQM_CMD_ACK_TYPE_CMDQ = 0, /* ack: write back to cmdq */ + CQM_CMD_ACK_TYPE_SHARE_CQN = 1, /* ack report scq by root ctx ctx */ + CQM_CMD_ACK_TYPE_APP_CQN = 2 /* ack report scq by parent ctx */ +}; + +struct cqm_bat_entry_cfg_s { + u32 cur_conn_num_h_4 :4; + u32 rsv1 :4; + u32 max_conn_num :20; + u32 rsv2 :4; + + u32 max_conn_cache :10; + u32 rsv3 :6; + u32 cur_conn_num_l_16 :16; + + u32 bloom_filter_addr :16; + u32 cur_conn_cache :10; + u32 rsv4 :6; + + u32 bucket_num :16; + u32 bloom_filter_len :16; +}; + +#define CQM_BAT_NO_BYPASS_CACHE 0 +#define CQM_BAT_ENTRY_SIZE_256 0 +#define CQM_BAT_ENTRY_SIZE_512 1 +#define CQM_BAT_ENTRY_SIZE_1024 2 + +struct cqm_bat_entry_standerd_s { + u32 entry_size :2; + u32 rsv1 :6; + u32 max_number :20; + u32 rsv2 :4; + + u32 cla_gpa_h :32; + + u32 cla_gpa_l :32; + + u32 rsv3 :8; + u32 z :5; + u32 y :5; + u32 x :5; + u32 rsv24 :1; + u32 bypass :1; + u32 cla_level :2; + u32 rsv5 :5; +}; + +struct cqm_bat_entry_taskmap_s { + u32 gpa0_h; + u32 gpa0_l; + + u32 gpa1_h; + u32 gpa1_l; + + u32 gpa2_h; + u32 gpa2_l; + + u32 gpa3_h; + u32 gpa3_l; +}; + +struct cqm_cla_cache_invalid_cmd_s { + u32 gpa_h; + u32 gpa_l; + u32 cache_size;/* CLA cache size=4096B */ +}; + +struct cqm_cla_update_cmd_s { + /* need to update gpa addr */ + u32 gpa_h; + u32 gpa_l; + + /* update value */ + u32 value_h; + u32 value_l; +}; + +struct cqm_bat_update_cmd_s { +#define CQM_BAT_MAX_SIZE 256 + u32 offset; /* byte offset,16Byte aligned */ + u32 byte_len; /* max size: 256byte */ + u8 data[CQM_BAT_MAX_SIZE]; +}; + +struct cqm_handle_s; + +struct cqm_linkwqe_s { + u32 rsv1 :14; + u32 wf :1; + u32 rsv2 :14; + u32 ctrlsl :2; + u32 o :1; + + u32 rsv3 :31; + u32 lp :1; + + u32 next_page_gpa_h; + u32 next_page_gpa_l; + + u32 next_buffer_addr_h; + u32 next_buffer_addr_l; +}; + +struct cqm_srq_linkwqe_s { + struct cqm_linkwqe_s linkwqe; + /*add by wss for srq*/ + u32 current_buffer_gpa_h; + u32 current_buffer_gpa_l; + u32 current_buffer_addr_h; + u32 current_buffer_addr_l; + + u32 fast_link_page_addr_h; + u32 fast_link_page_addr_l; + + u32 fixed_next_buffer_addr_h; + u32 fixed_next_buffer_addr_l; +}; + +union cqm_linkwqe_first_64b_s { + struct cqm_linkwqe_s basic_linkwqe; + u32 value[16]; +}; + +struct cqm_linkwqe_second_64b_s { + u32 rsvd0[4]; + u32 rsvd1[4]; + union { + struct { + u32 rsvd0[3]; + u32 rsvd1 :29; + u32 toe_o :1; + u32 resvd2 :2; + } bs; + u32 value[4]; + } third_16B; + + union { + struct { + u32 rsvd0[2]; + u32 rsvd1 :31; + u32 ifoe_o :1; + u32 rsvd2; + } bs; + u32 value[4]; + } forth_16B; + +}; + +struct cqm_linkwqe_128b_s { + union cqm_linkwqe_first_64b_s first_64b; + struct cqm_linkwqe_second_64b_s second_64b; +}; + +s32 cqm_bat_init(struct cqm_handle_s *cqm_handle); +void cqm_bat_uninit(struct cqm_handle_s *cqm_handle); +s32 cqm_cla_init(struct cqm_handle_s *cqm_handle); +void cqm_cla_uninit(struct cqm_handle_s *cqm_handle); +s32 cqm_bitmap_init(struct cqm_handle_s *cqm_handle); +void cqm_bitmap_uninit(struct cqm_handle_s *cqm_handle); +s32 cqm_object_table_init(struct cqm_handle_s *cqm_handle); +void cqm_object_table_uninit(struct cqm_handle_s *cqm_handle); + +#endif /* __CQM_OBJECT_H__ */ diff --git a/drivers/scsi/huawei/hifc/hifc_eqs.c b/drivers/scsi/huawei/hifc/hifc_eqs.c new file mode 100644 index 000000000000..803866e1fbf9 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_eqs.c @@ -0,0 +1,1347 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/pci.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_hwdev.h" +#include "hifc_eqs.h" + +#define HIFC_EQS_WQ_NAME "hifc_eqs" + +#define AEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define AEQ_CTRL_0_FUNC_BUSY_SHIFT 10 +#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 +#define AEQ_CTRL_0_QPS_NUM_SHIFT 22 +#define AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define AEQ_CTRL_0_FUNC_BUSY_MASK 0x1U +#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define AEQ_CTRL_0_QPS_NUM_MASK 0xFFU +#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define AEQ_CTRL_0_GET(val, member) \ + (((val) >> AEQ_CTRL_0_##member##_SHIFT) & \ + AEQ_CTRL_0_##member##_MASK) + +#define AEQ_CTRL_0_SET(val, member) \ + (((val) & AEQ_CTRL_0_##member##_MASK) << \ + AEQ_CTRL_0_##member##_SHIFT) + +#define AEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_0_##member##_MASK \ + << AEQ_CTRL_0_##member##_SHIFT))) + +#define AEQ_CTRL_1_LEN_SHIFT 0 +#define AEQ_CTRL_1_FUNC_OWN_SHIFT 21 +#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define AEQ_CTRL_1_FUNC_OWN_MASK 0x1U +#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define AEQ_CTRL_1_GET(val, member) \ + (((val) >> AEQ_CTRL_1_##member##_SHIFT) & \ + AEQ_CTRL_1_##member##_MASK) + +#define AEQ_CTRL_1_SET(val, member) \ + (((val) & AEQ_CTRL_1_##member##_MASK) << \ + AEQ_CTRL_1_##member##_SHIFT) + +#define AEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_1_##member##_MASK \ + << AEQ_CTRL_1_##member##_SHIFT))) + +#define HIFC_EQ_PROD_IDX_MASK 0xFFFFF +#define HIFC_TASK_PROCESS_EQE_LIMIT 1024 +#define HIFC_EQ_UPDATE_CI_STEP 64 + +static uint g_aeq_len = HIFC_DEFAULT_AEQ_LEN; +module_param(g_aeq_len, uint, 0444); +MODULE_PARM_DESC(g_aeq_len, + "aeq depth, valid range is " __stringify(HIFC_MIN_AEQ_LEN) + " - " __stringify(HIFC_MAX_AEQ_LEN)); + +static uint g_ceq_len = HIFC_DEFAULT_CEQ_LEN; +module_param(g_ceq_len, uint, 0444); +MODULE_PARM_DESC(g_ceq_len, + "ceq depth, valid range is " __stringify(HIFC_MIN_CEQ_LEN) + " - " __stringify(HIFC_MAX_CEQ_LEN)); + +static uint g_num_ceqe_in_tasklet = HIFC_TASK_PROCESS_EQE_LIMIT; +module_param(g_num_ceqe_in_tasklet, uint, 0444); +MODULE_PARM_DESC(g_num_ceqe_in_tasklet, + "The max number of ceqe can be processed in tasklet, default = 1024"); + +#define CEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 +#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 +#define CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU +#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U +#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define CEQ_CTRL_0_SET(val, member) \ + (((val) & CEQ_CTRL_0_##member##_MASK) << \ + CEQ_CTRL_0_##member##_SHIFT) + +#define CEQ_CTRL_1_LEN_SHIFT 0 +#define CEQ_CTRL_1_PAGE_SIZE_SHIFT 28 +#define CEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define CEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define CEQ_CTRL_1_SET(val, member) \ + (((val) & CEQ_CTRL_1_##member##_MASK) << \ + CEQ_CTRL_1_##member##_SHIFT) + +#define EQ_ELEM_DESC_TYPE_SHIFT 0 +#define EQ_ELEM_DESC_SRC_SHIFT 7 +#define EQ_ELEM_DESC_SIZE_SHIFT 8 +#define EQ_ELEM_DESC_WRAPPED_SHIFT 31 +#define EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define EQ_ELEM_DESC_SRC_MASK 0x1U +#define EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define EQ_ELEM_DESC_GET(val, member) \ + (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ + EQ_ELEM_DESC_##member##_MASK) + +#define EQ_CONS_IDX_CONS_IDX_SHIFT 0 +#define EQ_CONS_IDX_XOR_CHKSUM_SHIFT 24 +#define EQ_CONS_IDX_INT_ARMED_SHIFT 31 +#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU +#define EQ_CONS_IDX_XOR_CHKSUM_MASK 0xFU +#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U + +#define EQ_CONS_IDX_SET(val, member) \ + (((val) & EQ_CONS_IDX_##member##_MASK) << \ + EQ_CONS_IDX_##member##_SHIFT) + +#define EQ_CONS_IDX_CLEAR(val, member) \ + ((val) & (~(EQ_CONS_IDX_##member##_MASK \ + << EQ_CONS_IDX_##member##_SHIFT))) + +#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT) + +#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ + ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) + +#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HIFC_AEQ) ? \ + HIFC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ + HIFC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) + +#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HIFC_AEQ) ? \ + HIFC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ + HIFC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) + +#define GET_EQ_NUM_PAGES(eq, size) \ + ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \ + (size)) / (size))) + +#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size) + +#define GET_EQ_ELEMENT(eq, idx) \ + (((u8 *)(eq)->virt_addr[(idx) / (eq)->num_elem_in_pg]) + \ + (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) + +#define GET_AEQ_ELEM(eq, idx) ((struct hifc_aeq_elem *)\ + GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) + +#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx) + +#define PAGE_IN_4K(page_size) ((page_size) >> 12) +#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \ + ((u32)ilog2(PAGE_IN_4K((eq)->page_size))) + +#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) +#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) + +#define AEQ_DMA_ATTR_DEFAULT 0 +#define CEQ_DMA_ATTR_DEFAULT 0 +#define CEQ_LMT_KICK_DEFAULT 0 +#define EQ_MSIX_RESEND_TIMER_CLEAR 1 +#define EQ_WRAPPED_SHIFT 20 +#define EQ_VALID_SHIFT 31 +#define CEQE_TYPE_SHIFT 23 +#define CEQE_TYPE_MASK 0x7 + +#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \ + CEQE_TYPE_MASK) +#define CEQE_DATA_MASK 0x3FFFFFF +#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK) +#define EQ_MIN_PAGE_SIZE 0x1000U +#define aeq_to_aeqs(eq) \ + container_of((eq) - (eq)->q_id, struct hifc_aeqs, aeq[0]) + +#define ceq_to_ceqs(eq) \ + container_of((eq) - (eq)->q_id, struct hifc_ceqs, ceq[0]) + +/** + * aeq_interrupt - aeq interrupt handler + * @irq: irq number + * @data: the async event queue of the event + **/ +static irqreturn_t aeq_interrupt(int irq, void *data) +{ + struct hifc_eq *aeq = (struct hifc_eq *)data; + struct hifc_hwdev *hwdev = aeq->hwdev; + + struct hifc_aeqs *aeqs = aeq_to_aeqs(aeq); + struct workqueue_struct *workq = aeqs->workq; + struct hifc_eq_work *aeq_work; + + /* clear resend timer cnt register */ + hifc_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + aeq_work = &aeq->aeq_work; + aeq_work->data = aeq; + + queue_work(workq, &aeq_work->work); + + return IRQ_HANDLED; +} + +/** + * ceq_interrupt - ceq interrupt handler + * @irq: irq number + * @data: the completion event queue of the event + **/ +static irqreturn_t ceq_interrupt(int irq, void *data) +{ + struct hifc_eq *ceq = (struct hifc_eq *)data; + struct hifc_ceq_tasklet_data *ceq_tasklet_data; + + ceq->hard_intr_jif = jiffies; + + /* clear resend timer counters */ + hifc_misx_intr_clear_resend_bit(ceq->hwdev, ceq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + ceq_tasklet_data = &ceq->ceq_tasklet_data; + ceq_tasklet_data->data = data; + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + +static u8 eq_cons_idx_checksum_set(u32 val) +{ + u8 checksum = 0; + u8 idx; + + for (idx = 0; idx < 32; idx += 4) + checksum ^= ((val >> idx) & 0xF); + + return checksum & 0xF; +} + +/** + * hifc_aeq_register_hw_cb - register aeq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + * @hw_cb: callback function + * Return: 0 - success, negative - failure + **/ +int hifc_aeq_register_hw_cb(void *hwdev, enum hifc_aeq_type event, + hifc_aeq_hwe_cb hwe_cb) +{ + struct hifc_aeqs *aeqs; + + if (!hwdev || !hwe_cb || event >= HIFC_MAX_AEQ_EVENTS) + return -EINVAL; + + aeqs = ((struct hifc_hwdev *)hwdev)->aeqs; + + aeqs->aeq_hwe_cb[event] = hwe_cb; + + set_bit(HIFC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + return 0; +} + +/** + * hifc_aeq_unregister_hw_cb - unregister the aeq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + **/ +void hifc_aeq_unregister_hw_cb(void *hwdev, enum hifc_aeq_type event) +{ + struct hifc_aeqs *aeqs; + + if (!hwdev || event >= HIFC_MAX_AEQ_EVENTS) + return; + + aeqs = ((struct hifc_hwdev *)hwdev)->aeqs; + + clear_bit(HIFC_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + while (test_bit(HIFC_AEQ_HW_CB_RUNNING, &aeqs->aeq_hw_cb_state[event])) + usleep_range(900, 1000); + + aeqs->aeq_hwe_cb[event] = NULL; +} + +/** + * hifc_aeq_register_sw_cb - register aeq callback for sw event + * @hwdev: pointer to hw device + * @event: soft event for the handler + * @sw_cb: callback function + * Return: 0 - success, negative - failure + **/ +int hifc_aeq_register_swe_cb(void *hwdev, enum hifc_aeq_sw_type event, + hifc_aeq_swe_cb aeq_swe_cb) +{ + struct hifc_aeqs *aeqs; + + if (!hwdev || !aeq_swe_cb || event >= HIFC_MAX_AEQ_SW_EVENTS) + return -EINVAL; + + aeqs = ((struct hifc_hwdev *)hwdev)->aeqs; + + aeqs->aeq_swe_cb[event] = aeq_swe_cb; + + set_bit(HIFC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + return 0; +} + +/** + * hifc_aeq_unregister_sw_cb - unregister the aeq callback for sw event + * @hwdev: pointer to hw device + * @event: soft event for the handler + **/ +void hifc_aeq_unregister_swe_cb(void *hwdev, enum hifc_aeq_sw_type event) +{ + struct hifc_aeqs *aeqs; + + if (!hwdev || event >= HIFC_MAX_AEQ_SW_EVENTS) + return; + + aeqs = ((struct hifc_hwdev *)hwdev)->aeqs; + + clear_bit(HIFC_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + while (test_bit(HIFC_AEQ_SW_CB_RUNNING, &aeqs->aeq_sw_cb_state[event])) + usleep_range(900, 1000); + + aeqs->aeq_swe_cb[event] = NULL; +} + +/** + * hifc_ceq_register_sw_cb - register ceq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + * @callback: callback function + * Return: 0 - success, negative - failure + **/ +int hifc_ceq_register_cb(void *hwdev, enum hifc_ceq_event event, + hifc_ceq_event_cb callback) +{ + struct hifc_ceqs *ceqs; + + if (!hwdev || event >= HIFC_MAX_CEQ_EVENTS) + return -EINVAL; + + ceqs = ((struct hifc_hwdev *)hwdev)->ceqs; + + ceqs->ceq_cb[event] = callback; + + set_bit(HIFC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + return 0; +} + +/** + * hifc_ceq_unregister_cb - unregister ceq callback for specific event + * @hwdev: pointer to hw device + * @event: event for the handler + **/ +void hifc_ceq_unregister_cb(void *hwdev, enum hifc_ceq_event event) +{ + struct hifc_ceqs *ceqs; + + if (!hwdev || event >= HIFC_MAX_CEQ_EVENTS) + return; + + ceqs = ((struct hifc_hwdev *)hwdev)->ceqs; + + clear_bit(HIFC_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + while (test_bit(HIFC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event])) + usleep_range(900, 1000); + + ceqs->ceq_cb[event] = NULL; +} + +/** + * set_eq_cons_idx - write the cons idx to the hw + * @eq: The event queue to update the cons idx for + * @arm_state: arm state value + **/ +static void set_eq_cons_idx(struct hifc_eq *eq, u32 arm_state) +{ + u32 eq_wrap_ci, val; + u32 addr = EQ_CONS_IDX_REG_ADDR(eq); + + eq_wrap_ci = EQ_CONS_IDX(eq); + + /* other filed is resverd, set to 0 */ + val = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) | + EQ_CONS_IDX_SET(arm_state, INT_ARMED); + + val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); + + hifc_hwif_write_reg(eq->hwdev->hwif, addr, val); +} + +/** + * ceq_event_handler - handle for the ceq events + * @eqs: eqs part of the chip + * @ceqe: ceq element of the event + **/ +static void ceq_event_handler(struct hifc_ceqs *ceqs, u32 ceqe) +{ + struct hifc_hwdev *hwdev = ceqs->hwdev; + enum hifc_ceq_event event = CEQE_TYPE(ceqe); + u32 ceqe_data = CEQE_DATA(ceqe); + + if (event >= HIFC_MAX_CEQ_EVENTS) { + sdk_err(hwdev->dev_hdl, "Ceq unknown event:%d, ceqe date: 0x%x\n", + event, ceqe_data); + return; + } + + set_bit(HIFC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); + + if (ceqs->ceq_cb[event] && + test_bit(HIFC_CEQ_CB_REG, &ceqs->ceq_cb_state[event])) + ceqs->ceq_cb[event](hwdev, ceqe_data); + + clear_bit(HIFC_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); +} + +static void aeq_swe_handler(struct hifc_aeqs *aeqs, + struct hifc_aeq_elem *aeqe_pos, + enum hifc_aeq_type event) +{ + enum hifc_ucode_event_type ucode_event; + enum hifc_aeq_sw_type sw_event; + u64 aeqe_data; + u8 lev; + + ucode_event = event; + /* SW event uses only the first 8B */ + sw_event = ucode_event >= HIFC_NIC_FATAL_ERROR_MAX ? + HIFC_STATEFULL_EVENT : + HIFC_STATELESS_EVENT; + aeqe_data = be64_to_cpu((*(u64 *)aeqe_pos->aeqe_data)); + set_bit(HIFC_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_event]); + if (aeqs->aeq_swe_cb[sw_event] && + test_bit(HIFC_AEQ_SW_CB_REG, + &aeqs->aeq_sw_cb_state[sw_event])) { + lev = aeqs->aeq_swe_cb[sw_event](aeqs->hwdev, + ucode_event, + aeqe_data); + hifc_swe_fault_handler(aeqs->hwdev, lev, + ucode_event, aeqe_data); + } + clear_bit(HIFC_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_event]); +} + +static void aeq_hwe_handler(struct hifc_aeqs *aeqs, + struct hifc_aeq_elem *aeqe_pos, + enum hifc_aeq_type event, u32 aeqe_desc) +{ + u8 size; + + if (event < HIFC_MAX_AEQ_EVENTS) { + size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); + set_bit(HIFC_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + if (aeqs->aeq_hwe_cb[event] && + test_bit(HIFC_AEQ_HW_CB_REG, + &aeqs->aeq_hw_cb_state[event])) + aeqs->aeq_hwe_cb[event](aeqs->hwdev, + aeqe_pos->aeqe_data, size); + clear_bit(HIFC_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + + return; + } + + sdk_warn(aeqs->hwdev->dev_hdl, "Unknown aeq hw event %d\n", event); +} + +/** + * aeq_irq_handler - handler for the aeq event + * @eq: the async event queue of the event + * Return: true - success, false - failure + **/ +static bool aeq_irq_handler(struct hifc_eq *eq) +{ + struct hifc_aeqs *aeqs = aeq_to_aeqs(eq); + struct hifc_aeq_elem *aeqe_pos; + enum hifc_aeq_type event; + u32 aeqe_desc; + u32 i, eqe_cnt = 0; + + for (i = 0; i < HIFC_TASK_PROCESS_EQE_LIMIT; i++) { + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + + /* Data in HW is in Big endian Format */ + aeqe_desc = be32_to_cpu(aeqe_pos->desc); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) + return false; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the cmdq wqe until we have + * verified the command has been processed and + * written back. + */ + dma_rmb(); + + event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) + aeq_swe_handler(aeqs, aeqe_pos, event); + else + aeq_hwe_handler(aeqs, aeqe_pos, event, aeqe_desc); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HIFC_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HIFC_EQ_NOT_ARMED); + } + } + + return true; +} + +/** + * ceq_irq_handler - handler for the ceq event + * @eq: the completion event queue of the event + * Return: true - success, false - failure + **/ +static bool ceq_irq_handler(struct hifc_eq *eq) +{ + struct hifc_ceqs *ceqs = ceq_to_ceqs(eq); + u32 ceqe, eqe_cnt = 0; + u32 i; + + for (i = 0; i < g_num_ceqe_in_tasklet; i++) { + ceqe = *(GET_CURR_CEQ_ELEM(eq)); + ceqe = be32_to_cpu(ceqe); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) + return false; + + ceq_event_handler(ceqs, ceqe); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + + if (++eqe_cnt >= HIFC_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HIFC_EQ_NOT_ARMED); + } + } + + return true; +} + +/** + * eq_irq_handler - handler for the eq event + * @data: the event queue of the event + * Return: true - success, false - failure + **/ +static bool eq_irq_handler(void *data) +{ + struct hifc_eq *eq = (struct hifc_eq *)data; + bool uncompleted; + + if (eq->type == HIFC_AEQ) + uncompleted = aeq_irq_handler(eq); + else + uncompleted = ceq_irq_handler(eq); + + set_eq_cons_idx(eq, uncompleted ? HIFC_EQ_NOT_ARMED : HIFC_EQ_ARMED); + + return uncompleted; +} + +static void reschedule_eq_handler(struct hifc_eq *eq) +{ + if (eq->type == HIFC_AEQ) { + struct hifc_aeqs *aeqs = aeq_to_aeqs(eq); + struct workqueue_struct *workq = aeqs->workq; + struct hifc_eq_work *aeq_work = &eq->aeq_work; + + queue_work(workq, &aeq_work->work); + } else { + tasklet_schedule(&eq->ceq_tasklet); + } +} + +/** + * ceq_tasklet - ceq tasklet for the event + * @ceq_data: data that will be used by the tasklet(ceq) + **/ + +static void ceq_tasklet(ulong ceq_data) +{ + struct hifc_ceq_tasklet_data *ceq_tasklet_data = + (struct hifc_ceq_tasklet_data *)ceq_data; + struct hifc_eq *eq = (struct hifc_eq *)ceq_tasklet_data->data; + + eq->soft_intr_jif = jiffies; + + if (eq_irq_handler(ceq_tasklet_data->data)) + reschedule_eq_handler(ceq_tasklet_data->data); +} + +/** + * eq_irq_work - eq work for the event + * @work: the work that is associated with the eq + **/ +static void eq_irq_work(struct work_struct *work) +{ + struct hifc_eq_work *aeq_work = + container_of(work, struct hifc_eq_work, work); + + if (eq_irq_handler(aeq_work->data)) + reschedule_eq_handler(aeq_work->data); +} + +struct hifc_ceq_ctrl_reg { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 q_id; + u32 ctrl0; + u32 ctrl1; +}; + +static int set_ceq_ctrl_reg(struct hifc_hwdev *hwdev, u16 q_id, + u32 ctrl0, u32 ctrl1) +{ + struct hifc_ceq_ctrl_reg ceq_ctrl = {0}; + u16 in_size = sizeof(ceq_ctrl); + u16 out_size = sizeof(ceq_ctrl); + int err; + + err = hifc_global_func_id_get(hwdev, &ceq_ctrl.func_id); + if (err) + return err; + + ceq_ctrl.q_id = q_id; + ceq_ctrl.ctrl0 = ctrl0; + ceq_ctrl.ctrl1 = ctrl1; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP, + &ceq_ctrl, in_size, + &ceq_ctrl, &out_size, 0); + if (err || !out_size || ceq_ctrl.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n", + q_id, err, ceq_ctrl.status, out_size); + return -EFAULT; + } + + return 0; +} + +/** + * set_eq_ctrls - setting eq's ctrls registers + * @eq: the event queue for setting + * Return: 0 - success, negative - failure + **/ +static int set_eq_ctrls(struct hifc_eq *eq) +{ + enum hifc_eq_type type = eq->type; + struct hifc_hwif *hwif = eq->hwdev->hwif; + struct irq_info *eq_irq = &eq->eq_irq; + u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; + u32 pci_intf_idx = HIFC_PCI_INTF_IDX(hwif); + int err; + + if (type == HIFC_AEQ) { + /* set ctrl0 */ + addr = HIFC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); + + val = hifc_hwif_read_reg(hwif, addr); + + val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & + AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + AEQ_CTRL_0_CLEAR(val, INTR_MODE); + + ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + + AEQ_CTRL_0_SET(HIFC_INTR_MODE_ARMED, INTR_MODE); + + val |= ctrl0; + + hifc_hwif_write_reg(hwif, addr, val); + + /* set ctrl1 */ + addr = HIFC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + + ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | + AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + hifc_hwif_write_reg(hwif, addr, ctrl1); + + } else { + ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) | + CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + CEQ_CTRL_0_SET(HIFC_INTR_MODE_ARMED, INTR_MODE); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + + ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) | + CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + /* set ceq ctrl reg through mgmt cpu */ + err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1); + if (err) + return err; + } + + return 0; +} + +/** + * ceq_elements_init - Initialize all the elements in the ceq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void ceq_elements_init(struct hifc_eq *eq, u32 init_val) +{ + u32 i; + u32 *ceqe; + + for (i = 0; i < eq->eq_len; i++) { + ceqe = GET_CEQ_ELEM(eq, i); + *(ceqe) = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +/** + * aeq_elements_init - initialize all the elements in the aeq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void aeq_elements_init(struct hifc_eq *eq, u32 init_val) +{ + struct hifc_aeq_elem *aeqe; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + aeqe = GET_AEQ_ELEM(eq, i); + aeqe->desc = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +static void free_eq_pages_desc(struct hifc_eq *eq) +{ + kfree(eq->virt_addr_for_free); + kfree(eq->dma_addr_for_free); + kfree(eq->virt_addr); + kfree(eq->dma_addr); +} + +static int alloc_eq_pages_desc(struct hifc_eq *eq) +{ + u64 dma_addr_size, virt_addr_size; + int err; + + dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr); + virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr); + + eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL); + if (!eq->dma_addr) + return -ENOMEM; + + eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL); + if (!eq->virt_addr) { + err = -ENOMEM; + goto virt_addr_alloc_err; + } + + eq->dma_addr_for_free = kzalloc(dma_addr_size, GFP_KERNEL); + if (!eq->dma_addr_for_free) { + err = -ENOMEM; + goto dma_addr_free_alloc_err; + } + + eq->virt_addr_for_free = kzalloc(virt_addr_size, GFP_KERNEL); + if (!eq->virt_addr_for_free) { + err = -ENOMEM; + goto virt_addr_free_alloc_err; + } + + return 0; + +virt_addr_free_alloc_err: + kfree(eq->dma_addr_for_free); +dma_addr_free_alloc_err: + kfree(eq->virt_addr); +virt_addr_alloc_err: + kfree(eq->dma_addr); + return err; +} + +#define IS_ALIGN(x, a) (((x) & ((a) - 1)) == 0) + +static int init_eq_elements(struct hifc_eq *eq) +{ + u32 init_val; + + eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size); + if (!IS_ALIGN(eq->num_elem_in_pg, eq->num_elem_in_pg)) { + sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n"); + return -EINVAL; + } + + init_val = EQ_WRAPPED(eq); + + if (eq->type == HIFC_AEQ) + aeq_elements_init(eq, init_val); + else + ceq_elements_init(eq, init_val); + + return 0; +} + +/** + * alloc_eq_pages - allocate the pages for the queue + * @eq: the event queue + * Return: 0 - success, negative - failure + **/ +static int alloc_eq_pages(struct hifc_eq *eq) +{ + struct hifc_hwif *hwif = eq->hwdev->hwif; + u16 pg_num, i; + u32 reg; + int err; + u8 flag = 0; + + err = alloc_eq_pages_desc(eq); + if (err) { + sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq pages description\n"); + return err; + } + + for (pg_num = 0; pg_num < eq->num_pages; pg_num++) { + eq->virt_addr_for_free[pg_num] = dma_zalloc_coherent + (eq->hwdev->dev_hdl, eq->page_size, + &eq->dma_addr_for_free[pg_num], GFP_KERNEL); + if (!eq->virt_addr_for_free[pg_num]) { + err = -ENOMEM; + goto dma_alloc_err; + } + + eq->dma_addr[pg_num] = eq->dma_addr_for_free[pg_num]; + eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num]; + if (!IS_ALIGN(eq->dma_addr_for_free[pg_num], + eq->page_size)) { + sdk_info(eq->hwdev->dev_hdl, + "Address is not aligned to %u-bytes as hardware required\n", + eq->page_size); + sdk_info(eq->hwdev->dev_hdl, "Change eq's page size %u\n", + ((eq->page_size) >> 1)); + eq->dma_addr[pg_num] = ALIGN + (eq->dma_addr_for_free[pg_num], + (u64)((eq->page_size) >> 1)); + eq->virt_addr[pg_num] = eq->virt_addr_for_free[pg_num] + + ((u64)eq->dma_addr[pg_num] + - (u64)eq->dma_addr_for_free[pg_num]); + flag = 1; + } + reg = HIFC_EQ_HI_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num); + hifc_hwif_write_reg(hwif, reg, + upper_32_bits(eq->dma_addr[pg_num])); + + reg = HIFC_EQ_LO_PHYS_ADDR_REG(eq->type, eq->q_id, pg_num); + hifc_hwif_write_reg(hwif, reg, + lower_32_bits(eq->dma_addr[pg_num])); + } + + if (flag) { + eq->page_size = eq->page_size >> 1; + eq->eq_len = eq->eq_len >> 1; + } + + err = init_eq_elements(eq); + if (err) { + sdk_err(eq->hwdev->dev_hdl, "Failed to init eq elements\n"); + goto dma_alloc_err; + } + + return 0; + +dma_alloc_err: + for (i = 0; i < pg_num; i++) + dma_free_coherent(eq->hwdev->dev_hdl, eq->page_size, + eq->virt_addr_for_free[i], + eq->dma_addr_for_free[i]); + free_eq_pages_desc(eq); + return err; +} + +/** + * free_eq_pages - free the pages of the queue + * @eq: the event queue + **/ +static void free_eq_pages(struct hifc_eq *eq) +{ + struct hifc_hwdev *hwdev = eq->hwdev; + u16 pg_num; + + for (pg_num = 0; pg_num < eq->num_pages; pg_num++) + dma_free_coherent(hwdev->dev_hdl, eq->orig_page_size, + eq->virt_addr_for_free[pg_num], + eq->dma_addr_for_free[pg_num]); + + free_eq_pages_desc(eq); +} + +static inline u32 get_page_size(struct hifc_eq *eq) +{ + u32 total_size; + u16 count, n = 0; + + total_size = ALIGN((eq->eq_len * eq->elem_size), EQ_MIN_PAGE_SIZE); + + if (total_size <= (HIFC_EQ_MAX_PAGES * EQ_MIN_PAGE_SIZE)) + return EQ_MIN_PAGE_SIZE; + + count = (u16)(ALIGN((total_size / HIFC_EQ_MAX_PAGES), + EQ_MIN_PAGE_SIZE) / EQ_MIN_PAGE_SIZE); + + if (!(count & (count - 1))) + return EQ_MIN_PAGE_SIZE * count; + + while (count) { + count >>= 1; + n++; + } + + return EQ_MIN_PAGE_SIZE << n; +} + +static int request_eq_irq(struct hifc_eq *eq, enum hifc_eq_type type, + struct irq_info *entry) +{ + int err = 0; + + if (type == HIFC_AEQ) { + struct hifc_eq_work *aeq_work = &eq->aeq_work; + + INIT_WORK(&aeq_work->work, eq_irq_work); + } else { + tasklet_init(&eq->ceq_tasklet, ceq_tasklet, + (ulong)(&eq->ceq_tasklet_data)); + } + + if (type == HIFC_AEQ) { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "hifc_aeq%d@pci:%s", eq->q_id, + pci_name(eq->hwdev->pcidev_hdl)); + + err = request_irq(entry->irq_id, aeq_interrupt, 0UL, + eq->irq_name, eq); + } else { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "hifc_ceq%d@pci:%s", eq->q_id, + pci_name(eq->hwdev->pcidev_hdl)); + + err = request_irq(entry->irq_id, ceq_interrupt, 0UL, + eq->irq_name, eq); + } + + return err; +} + +/** + * init_eq - initialize eq + * @eq: the event queue + * @hwdev: the pointer to hw device + * @q_id: Queue id number + * @q_len: the number of EQ elements + * @type: the type of the event queue, ceq or aeq + * @entry: msix entry associated with the event queue + * Return: 0 - Success, Negative - failure + **/ +static int init_eq(struct hifc_eq *eq, struct hifc_hwdev *hwdev, u16 q_id, + u32 q_len, enum hifc_eq_type type, struct irq_info *entry) +{ + int err = 0; + + eq->hwdev = hwdev; + eq->q_id = q_id; + eq->type = type; + eq->eq_len = q_len; + + /* clear eq_len to force eqe drop in hardware */ + if (eq->type == HIFC_AEQ) + hifc_hwif_write_reg(eq->hwdev->hwif, + HIFC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + else + set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + + eq->cons_idx = 0; + eq->wrapped = 0; + + eq->elem_size = (type == HIFC_AEQ) ? + HIFC_AEQE_SIZE : HIFC_CEQE_SIZE; + + eq->page_size = get_page_size(eq); + eq->orig_page_size = eq->page_size; + eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size); + if (eq->num_pages > HIFC_EQ_MAX_PAGES) { + sdk_err(hwdev->dev_hdl, "Number pages:%d too many pages for eq\n", + eq->num_pages); + return -EINVAL; + } + + err = alloc_eq_pages(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + return err; + } + + eq->eq_irq.msix_entry_idx = entry->msix_entry_idx; + eq->eq_irq.irq_id = entry->irq_id; + + err = set_eq_ctrls(eq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + goto init_eq_ctrls_err; + } + + hifc_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); + set_eq_cons_idx(eq, HIFC_EQ_ARMED); + + err = request_eq_irq(eq, type, entry); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to request irq for the eq, err: %d\n", + err); + goto req_irq_err; + } + + hifc_set_msix_state(hwdev, entry->msix_entry_idx, HIFC_MSIX_ENABLE); + + return 0; + +init_eq_ctrls_err: +req_irq_err: + free_eq_pages(eq); + return err; +} + +/** + * remove_eq - remove eq + * @eq: the event queue + **/ +static void remove_eq(struct hifc_eq *eq) +{ + struct irq_info *entry = &eq->eq_irq; + + hifc_set_msix_state(eq->hwdev, entry->msix_entry_idx, + HIFC_MSIX_DISABLE); + synchronize_irq(entry->irq_id); + + free_irq(entry->irq_id, eq); + + if (eq->type == HIFC_AEQ) { + struct hifc_eq_work *aeq_work = &eq->aeq_work; + + cancel_work_sync(&aeq_work->work); + + /* clear eq_len to avoid hw access host memory */ + hifc_hwif_write_reg(eq->hwdev->hwif, + HIFC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); + } else { + tasklet_kill(&eq->ceq_tasklet); + + set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + } + + /* update cons_idx to avoid invalid interrupt */ + eq->cons_idx = hifc_hwif_read_reg(eq->hwdev->hwif, + EQ_PROD_IDX_REG_ADDR(eq)); + set_eq_cons_idx(eq, HIFC_EQ_NOT_ARMED); + + free_eq_pages(eq); +} + +/** + * hifc_aeqs_init - init all the aeqs + * @hwdev: the pointer to hw device + * @num_ceqs: number of AEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + **/ +int hifc_aeqs_init(struct hifc_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries) +{ + struct hifc_aeqs *aeqs; + int err; + u16 i, q_id; + + aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); + if (!aeqs) + return -ENOMEM; + + hwdev->aeqs = aeqs; + aeqs->hwdev = hwdev; + aeqs->num_aeqs = num_aeqs; + + aeqs->workq = create_singlethread_workqueue(HIFC_EQS_WQ_NAME); + if (!aeqs->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n"); + err = -ENOMEM; + goto create_work_err; + } + + if (g_aeq_len < HIFC_MIN_AEQ_LEN || g_aeq_len > HIFC_MAX_AEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %d out of range, resetting to %d\n", + g_aeq_len, HIFC_DEFAULT_AEQ_LEN); + g_aeq_len = HIFC_DEFAULT_AEQ_LEN; + } + + for (q_id = 0; q_id < num_aeqs; q_id++) { + err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, g_aeq_len, + HIFC_AEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeq %d\n", + q_id); + goto init_aeq_err; + } + } + + return 0; + +init_aeq_err: + for (i = 0; i < q_id; i++) + remove_eq(&aeqs->aeq[i]); + + destroy_workqueue(aeqs->workq); + +create_work_err: + kfree(aeqs); + + return err; +} + +/** + * hifc_aeqs_free - free all the aeqs + * @hwdev: the pointer to hw device + **/ +void hifc_aeqs_free(struct hifc_hwdev *hwdev) +{ + struct hifc_aeqs *aeqs = hwdev->aeqs; + enum hifc_aeq_type aeq_event = HIFC_HW_INTER_INT; + enum hifc_aeq_sw_type sw_aeq_event = HIFC_STATELESS_EVENT; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) + remove_eq(&aeqs->aeq[q_id]); + + for (; sw_aeq_event < HIFC_MAX_AEQ_SW_EVENTS; sw_aeq_event++) + hifc_aeq_unregister_swe_cb(hwdev, sw_aeq_event); + + for (; aeq_event < HIFC_MAX_AEQ_EVENTS; aeq_event++) + hifc_aeq_unregister_hw_cb(hwdev, aeq_event); + + destroy_workqueue(aeqs->workq); + + kfree(aeqs); +} + +/** + * hifc_ceqs_init - init all the ceqs + * @hwdev: the pointer to hw device + * @num_ceqs: number of CEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + **/ +int hifc_ceqs_init(struct hifc_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries) +{ + struct hifc_ceqs *ceqs; + int err; + u16 i, q_id; + + ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL); + if (!ceqs) + return -ENOMEM; + + hwdev->ceqs = ceqs; + + ceqs->hwdev = hwdev; + ceqs->num_ceqs = num_ceqs; + + if (g_ceq_len < HIFC_MIN_CEQ_LEN || g_ceq_len > HIFC_MAX_CEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %d out of range, resetting to %d\n", + g_ceq_len, HIFC_DEFAULT_CEQ_LEN); + g_ceq_len = HIFC_DEFAULT_CEQ_LEN; + } + + if (!g_num_ceqe_in_tasklet) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n", + HIFC_TASK_PROCESS_EQE_LIMIT); + g_num_ceqe_in_tasklet = HIFC_TASK_PROCESS_EQE_LIMIT; + } + + for (q_id = 0; q_id < num_ceqs; q_id++) { + err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, g_ceq_len, + HIFC_CEQ, &msix_entries[q_id]); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceq %d\n", + q_id); + goto init_ceq_err; + } + } + + return 0; + +init_ceq_err: + for (i = 0; i < q_id; i++) + remove_eq(&ceqs->ceq[i]); + + kfree(ceqs); + + return err; +} + +/** + * hifc_ceqs_free - free all the ceqs + * @hwdev: the pointer to hw device + **/ +void hifc_ceqs_free(struct hifc_hwdev *hwdev) +{ + struct hifc_ceqs *ceqs = hwdev->ceqs; + enum hifc_ceq_event ceq_event = HIFC_CMDQ; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) + remove_eq(&ceqs->ceq[q_id]); + + for (; ceq_event < HIFC_MAX_CEQ_EVENTS; ceq_event++) + hifc_ceq_unregister_cb(hwdev, ceq_event); + + kfree(ceqs); +} + +void hifc_get_ceq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hifc_ceqs *ceqs = hwdev->ceqs; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + ceqs->ceq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = ceqs->num_ceqs; +} + +void hifc_get_aeq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hifc_aeqs *aeqs = hwdev->aeqs; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + aeqs->aeq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = aeqs->num_aeqs; +} + +void hifc_dump_aeq_info(struct hifc_hwdev *hwdev) +{ + struct hifc_aeq_elem *aeqe_pos; + struct hifc_eq *eq; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) { + eq = &hwdev->aeqs->aeq[q_id]; + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hifc_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hifc_hwif_read_reg(hwdev->hwif, addr); + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + sdk_err(hwdev->dev_hdl, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n", + q_id, ci, pi, work_busy(&eq->aeq_work.work), + eq->wrapped, be32_to_cpu(aeqe_pos->desc)); + } +} + diff --git a/drivers/scsi/huawei/hifc/hifc_eqs.h b/drivers/scsi/huawei/hifc/hifc_eqs.h new file mode 100644 index 000000000000..2dcfc432c8f2 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_eqs.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HIFC_EQS_H +#define HIFC_EQS_H + +#define HIFC_MAX_AEQS 3 +#define HIFC_MAX_CEQS 32 + +#define HIFC_EQ_MAX_PAGES 8 + +#define HIFC_AEQE_SIZE 64 +#define HIFC_CEQE_SIZE 4 + +#define HIFC_AEQE_DESC_SIZE 4 +#define HIFC_AEQE_DATA_SIZE \ + (HIFC_AEQE_SIZE - HIFC_AEQE_DESC_SIZE) + +#define HIFC_DEFAULT_AEQ_LEN 4096 +#define HIFC_DEFAULT_CEQ_LEN 8192 + +#define HIFC_MIN_AEQ_LEN 64 +#define HIFC_MAX_AEQ_LEN (512 * 1024) +#define HIFC_MIN_CEQ_LEN 64 +#define HIFC_MAX_CEQ_LEN (1024 * 1024) + +#define HIFC_CEQ_ID_CMDQ 0 +#define EQ_IRQ_NAME_LEN 64 + +/* EQ registers */ +#define HIFC_AEQ_MTT_OFF_BASE_ADDR 0x200 +#define HIFC_CEQ_MTT_OFF_BASE_ADDR 0x400 + +#define HIFC_EQ_MTT_OFF_STRIDE 0x40 + +#define HIFC_CSR_AEQ_MTT_OFF(id) \ + (HIFC_AEQ_MTT_OFF_BASE_ADDR + (id) * HIFC_EQ_MTT_OFF_STRIDE) + +#define HIFC_CSR_CEQ_MTT_OFF(id) \ + (HIFC_CEQ_MTT_OFF_BASE_ADDR + (id) * HIFC_EQ_MTT_OFF_STRIDE) + +#define HIFC_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define HIFC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HIFC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HIFC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HIFC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HIFC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HIFC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HIFC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HIFC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HIFC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HIFC_EQ_HI_PHYS_ADDR_REG(type, q_id, pg_num) \ + ((u32)((type == HIFC_AEQ) ? \ + HIFC_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) : \ + HIFC_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num))) + +#define HIFC_EQ_LO_PHYS_ADDR_REG(type, q_id, pg_num) \ + ((u32)((type == HIFC_AEQ) ? \ + HIFC_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) : \ + HIFC_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num))) + +#define HIFC_AEQ_CTRL_0_ADDR_BASE 0xE00 +#define HIFC_AEQ_CTRL_1_ADDR_BASE 0xE04 +#define HIFC_AEQ_CONS_IDX_0_ADDR_BASE 0xE08 +#define HIFC_AEQ_CONS_IDX_1_ADDR_BASE 0xE0C + +#define HIFC_EQ_OFF_STRIDE 0x80 + +#define HIFC_CSR_AEQ_CTRL_0_ADDR(idx) \ + (HIFC_AEQ_CTRL_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_AEQ_CTRL_1_ADDR(idx) \ + (HIFC_AEQ_CTRL_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_AEQ_CONS_IDX_ADDR(idx) \ + (HIFC_AEQ_CONS_IDX_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_AEQ_PROD_IDX_ADDR(idx) \ + (HIFC_AEQ_CONS_IDX_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CEQ_CTRL_0_ADDR_BASE 0x1000 +#define HIFC_CEQ_CTRL_1_ADDR_BASE 0x1004 +#define HIFC_CEQ_CONS_IDX_0_ADDR_BASE 0x1008 +#define HIFC_CEQ_CONS_IDX_1_ADDR_BASE 0x100C + +#define HIFC_CSR_CEQ_CTRL_0_ADDR(idx) \ + (HIFC_CEQ_CTRL_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_CEQ_CTRL_1_ADDR(idx) \ + (HIFC_CEQ_CTRL_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_CEQ_CONS_IDX_ADDR(idx) \ + (HIFC_CEQ_CONS_IDX_0_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +#define HIFC_CSR_CEQ_PROD_IDX_ADDR(idx) \ + (HIFC_CEQ_CONS_IDX_1_ADDR_BASE + (idx) * HIFC_EQ_OFF_STRIDE) + +enum hifc_eq_type { + HIFC_AEQ, + HIFC_CEQ +}; + +enum hifc_eq_intr_mode { + HIFC_INTR_MODE_ARMED, + HIFC_INTR_MODE_ALWAYS, +}; + +enum hifc_eq_ci_arm_state { + HIFC_EQ_NOT_ARMED, + HIFC_EQ_ARMED, +}; + +struct hifc_eq_work { + struct work_struct work; + void *data; +}; + +struct hifc_ceq_tasklet_data { + void *data; +}; + +struct hifc_eq { + struct hifc_hwdev *hwdev; + u16 q_id; + enum hifc_eq_type type; + u32 page_size; + u32 orig_page_size; + u32 eq_len; + + u32 cons_idx; + u16 wrapped; + + u16 elem_size; + u16 num_pages; + u32 num_elem_in_pg; + + struct irq_info eq_irq; + char irq_name[EQ_IRQ_NAME_LEN]; + + dma_addr_t *dma_addr; + u8 **virt_addr; + dma_addr_t *dma_addr_for_free; + u8 **virt_addr_for_free; + + struct hifc_eq_work aeq_work; + struct tasklet_struct ceq_tasklet; + struct hifc_ceq_tasklet_data ceq_tasklet_data; + + u64 hard_intr_jif; + u64 soft_intr_jif; +}; + +struct hifc_aeq_elem { + u8 aeqe_data[HIFC_AEQE_DATA_SIZE]; + u32 desc; +}; + +enum hifc_aeq_cb_state { + HIFC_AEQ_HW_CB_REG = 0, + HIFC_AEQ_HW_CB_RUNNING, + HIFC_AEQ_SW_CB_REG, + HIFC_AEQ_SW_CB_RUNNING, +}; + +struct hifc_aeqs { + struct hifc_hwdev *hwdev; + + hifc_aeq_hwe_cb aeq_hwe_cb[HIFC_MAX_AEQ_EVENTS]; + hifc_aeq_swe_cb aeq_swe_cb[HIFC_MAX_AEQ_SW_EVENTS]; + unsigned long aeq_hw_cb_state[HIFC_MAX_AEQ_EVENTS]; + unsigned long aeq_sw_cb_state[HIFC_MAX_AEQ_SW_EVENTS]; + + struct hifc_eq aeq[HIFC_MAX_AEQS]; + u16 num_aeqs; + + struct workqueue_struct *workq; +}; + +enum hifc_ceq_cb_state { + HIFC_CEQ_CB_REG = 0, + HIFC_CEQ_CB_RUNNING, +}; + +struct hifc_ceqs { + struct hifc_hwdev *hwdev; + + hifc_ceq_event_cb ceq_cb[HIFC_MAX_CEQ_EVENTS]; + void *ceq_data[HIFC_MAX_CEQ_EVENTS]; + unsigned long ceq_cb_state[HIFC_MAX_CEQ_EVENTS]; + + struct hifc_eq ceq[HIFC_MAX_CEQS]; + u16 num_ceqs; +}; + +int hifc_aeqs_init(struct hifc_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries); + +void hifc_aeqs_free(struct hifc_hwdev *hwdev); + +int hifc_ceqs_init(struct hifc_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries); + +void hifc_ceqs_free(struct hifc_hwdev *hwdev); + +void hifc_get_ceq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hifc_get_aeq_irqs(struct hifc_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hifc_dump_aeq_info(struct hifc_hwdev *hwdev); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_hw.h b/drivers/scsi/huawei/hifc/hifc_hw.h new file mode 100644 index 000000000000..49b2edd2bac6 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hw.h @@ -0,0 +1,611 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_HW_H_ +#define HIFC_HW_H_ + +#ifndef __BIG_ENDIAN__ +#define __BIG_ENDIAN__ 0x4321 +#endif + +#ifndef __LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN__ 0x1234 +#endif + +enum hifc_mod_type { + HIFC_MOD_COMM = 0, /* HW communication module */ + HIFC_MOD_L2NIC = 1, /* L2NIC module*/ + HIFC_MOD_FCOE = 6, + HIFC_MOD_CFGM = 7, /* Configuration module */ + HIFC_MOD_FC = 10, + HIFC_MOD_HILINK = 14, + HIFC_MOD_HW_MAX = 16, /* hardware max module id */ + + /* Software module id, for PF/VF and multi-host */ + HIFC_MOD_MAX, +}; + +struct hifc_cmd_buf { + void *buf; + dma_addr_t dma_addr; + u16 size; +}; + +enum hifc_ack_type { + HIFC_ACK_TYPE_CMDQ, + HIFC_ACK_TYPE_SHARE_CQN, + HIFC_ACK_TYPE_APP_CQN, + HIFC_MOD_ACK_MAX = 15, +}; + +int hifc_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); + +/* PF/VF send msg to uP by api cmd, and return immediately */ +int hifc_msg_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size); + +int hifc_api_cmd_write_nack(void *hwdev, u8 dest, + void *cmd, u16 size); + +int hifc_api_cmd_read_ack(void *hwdev, u8 dest, + void *cmd, u16 size, void *ack, u16 ack_size); +/* PF/VF send cmd to ucode by cmdq, and return if success. + * timeout=0, use default timeout. + */ +int hifc_cmdq_direct_resp(void *hwdev, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, + u64 *out_param, u32 timeout); +/* 1. whether need the timeout parameter + * 2. out_param indicates the status of the microcode processing command + */ + +/* PF/VF send cmd to ucode by cmdq, and return detailed result. + * timeout=0, use default timeout. + */ +int hifc_cmdq_detail_resp(void *hwdev, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in, + struct hifc_cmd_buf *buf_out, u32 timeout); + +/* PF/VF send cmd to ucode by cmdq, and return immediately + */ +int hifc_cmdq_async(void *hwdev, enum hifc_ack_type ack_type, + enum hifc_mod_type mod, u8 cmd, + struct hifc_cmd_buf *buf_in); + +int hifc_ppf_tmr_start(void *hwdev); +int hifc_ppf_tmr_stop(void *hwdev); + +enum hifc_ceq_event { + HIFC_CMDQ = 3, + HIFC_MAX_CEQ_EVENTS = 6, +}; + +typedef void (*hifc_ceq_event_cb)(void *handle, u32 ceqe_data); +int hifc_ceq_register_cb(void *hwdev, enum hifc_ceq_event event, + hifc_ceq_event_cb callback); +void hifc_ceq_unregister_cb(void *hwdev, enum hifc_ceq_event event); + +enum hifc_aeq_type { + HIFC_HW_INTER_INT = 0, + HIFC_MBX_FROM_FUNC = 1, + HIFC_MSG_FROM_MGMT_CPU = 2, + HIFC_API_RSP = 3, + HIFC_API_CHAIN_STS = 4, + HIFC_MBX_SEND_RSLT = 5, + HIFC_MAX_AEQ_EVENTS +}; + +enum hifc_aeq_sw_type { + HIFC_STATELESS_EVENT = 0, + HIFC_STATEFULL_EVENT = 1, + HIFC_MAX_AEQ_SW_EVENTS +}; + +typedef void (*hifc_aeq_hwe_cb)(void *handle, u8 *data, u8 size); +int hifc_aeq_register_hw_cb(void *hwdev, enum hifc_aeq_type event, + hifc_aeq_hwe_cb hwe_cb); +void hifc_aeq_unregister_hw_cb(void *hwdev, enum hifc_aeq_type event); + +typedef u8 (*hifc_aeq_swe_cb)(void *handle, u8 event, u64 data); +int hifc_aeq_register_swe_cb(void *hwdev, enum hifc_aeq_sw_type event, + hifc_aeq_swe_cb aeq_swe_cb); +void hifc_aeq_unregister_swe_cb(void *hwdev, enum hifc_aeq_sw_type event); + +typedef void (*hifc_mgmt_msg_cb)(void *hwdev, void *pri_handle, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); + +int hifc_register_mgmt_msg_cb(void *hwdev, + enum hifc_mod_type mod, void *pri_handle, + hifc_mgmt_msg_cb callback); +void hifc_unregister_mgmt_msg_cb(void *hwdev, enum hifc_mod_type mod); + +struct hifc_cmd_buf *hifc_alloc_cmd_buf(void *hwdev); +void hifc_free_cmd_buf(void *hwdev, struct hifc_cmd_buf *buf); + +int hifc_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base); +void hifc_free_db_addr(void *hwdev, void __iomem *db_base, + void __iomem *dwqe_base); + +struct nic_interrupt_info { + u32 lli_set; + u32 interrupt_coalesc_set; + u16 msix_index; + u8 lli_credit_limit; + u8 lli_timer_cfg; + u8 pending_limt; + u8 coalesc_timer_cfg; + u8 resend_timer_cfg; +}; + +int hifc_get_interrupt_cfg(void *hwdev, + struct nic_interrupt_info *interrupt_info); + +int hifc_set_interrupt_cfg(void *hwdev, + struct nic_interrupt_info interrupt_info); + +/* The driver code implementation interface*/ +void hifc_misx_intr_clear_resend_bit(void *hwdev, + u16 msix_idx, u8 clear_resend_en); + +struct hifc_sq_attr { + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u64 ci_dma_base; +}; + +int hifc_set_ci_table(void *hwdev, u16 q_id, struct hifc_sq_attr *attr); + +int hifc_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz); +int hifc_clean_root_ctxt(void *hwdev); +void hifc_record_pcie_error(void *hwdev); + +int hifc_func_rx_tx_flush(void *hwdev); + +int hifc_func_tmr_bitmap_set(void *hwdev, bool enable); + +struct hifc_init_para { + /* Record hifc_pcidev or NDIS_Adapter pointer address*/ + void *adapter_hdl; + /* Record pcidev or Handler pointer address + * for example: ioremap interface input parameter + */ + void *pcidev_hdl; + /* Record pcidev->dev or Handler pointer address which used to + * dma address application or dev_err print the parameter + */ + void *dev_hdl; + + void *cfg_reg_base; /* Configure virtual address, bar0/1*/ + /* interrupt configuration register address, bar2/3 */ + void *intr_reg_base; + u64 db_base_phy; + void *db_base; /* the doorbell address, bar4/5 higher 4M space*/ + void *dwqe_mapping;/* direct wqe 4M, follow the doorbell address space*/ + void **hwdev; + void *chip_node; + /* In bmgw x86 host, driver can't send message to mgmt cpu directly, + * need to trasmit message ppf mbox to bmgw arm host. + */ + void *ppf_hwdev; +}; + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif +#define MAX_FUNCTION_NUM 512 +#define HIFC_MAX_PF_NUM 16 +#define HIFC_MAX_COS 8 +#define INIT_FAILED 0 +#define INIT_SUCCESS 1 +#define MAX_DRV_BUF_SIZE 4096 + +struct hifc_cmd_get_light_module_abs { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsv[2]; +}; + +#define SFP_INFO_MAX_SIZE 512 +struct hifc_cmd_get_sfp_qsfp_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 wire_type; + u16 out_len; + u8 sfp_qsfp_info[SFP_INFO_MAX_SIZE]; +}; + +#define HIFC_MAX_PORT_ID 4 + +struct hifc_port_routine_cmd { + bool up_send_sfp_info; + bool up_send_sfp_abs; + + struct hifc_cmd_get_sfp_qsfp_info sfp_info; + struct hifc_cmd_get_light_module_abs abs; +}; + +struct card_node { + struct list_head node; + struct list_head func_list; + char chip_name[IFNAMSIZ]; + void *log_info; + void *dbgtool_info; + void *func_handle_array[MAX_FUNCTION_NUM]; + unsigned char dp_bus_num; + u8 func_num; + struct attribute dbgtool_attr_file; + + bool cos_up_setted; + u8 cos_up[HIFC_MAX_COS]; + bool ppf_state; + u8 pf_bus_num[HIFC_MAX_PF_NUM]; + + struct hifc_port_routine_cmd rt_cmd[HIFC_MAX_PORT_ID]; + + /* mutex used for copy sfp info */ + struct mutex sfp_mutex; +}; + +enum hifc_hwdev_init_state { + HIFC_HWDEV_NONE_INITED = 0, + HIFC_HWDEV_CLP_INITED, + HIFC_HWDEV_AEQ_INITED, + HIFC_HWDEV_MGMT_INITED, + HIFC_HWDEV_MBOX_INITED, + HIFC_HWDEV_CMDQ_INITED, + HIFC_HWDEV_COMM_CH_INITED, + HIFC_HWDEV_ALL_INITED, + HIFC_HWDEV_MAX_INVAL_INITED +}; + +enum hifc_func_cap { + /* send message to mgmt cpu directly */ + HIFC_FUNC_MGMT = 1 << 0, + /* setting port attribute, pause/speed etc. */ + HIFC_FUNC_PORT = 1 << 1, + /* Enable SR-IOV in default */ + HIFC_FUNC_SRIOV_EN_DFLT = 1 << 2, + /* Can't change VF num */ + HIFC_FUNC_SRIOV_NUM_FIX = 1 << 3, + /* Fcorce pf/vf link up */ + HIFC_FUNC_FORCE_LINK_UP = 1 << 4, + /* Support rate limit */ + HIFC_FUNC_SUPP_RATE_LIMIT = 1 << 5, + HIFC_FUNC_SUPP_DFX_REG = 1 << 6, + /* Support promisc/multicast/all-multi */ + HIFC_FUNC_SUPP_RX_MODE = 1 << 7, + /* Set vf mac and vlan by ip link */ + HIFC_FUNC_SUPP_SET_VF_MAC_VLAN = 1 << 8, + /* Support set mac by ifconfig */ + HIFC_FUNC_SUPP_CHANGE_MAC = 1 << 9, + /* OVS don't support SCTP_CRC/HW_VLAN/LRO */ + HIFC_FUNC_OFFLOAD_OVS_UNSUPP = 1 << 10, +}; + +#define FUNC_SUPPORT_MGMT(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & HIFC_FUNC_MGMT)) +#define FUNC_SUPPORT_PORT_SETTING(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & HIFC_FUNC_PORT)) +#define FUNC_SUPPORT_DCB(hwdev) \ + (FUNC_SUPPORT_PORT_SETTING(hwdev)) +#define FUNC_ENABLE_SRIOV_IN_DEFAULT(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SRIOV_EN_DFLT)) +#define FUNC_SRIOV_FIX_NUM_VF(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SRIOV_NUM_FIX)) +#define FUNC_SUPPORT_RX_MODE(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SUPP_RX_MODE)) +#define FUNC_SUPPORT_RATE_LIMIT(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SUPP_RATE_LIMIT)) +#define FUNC_SUPPORT_SET_VF_MAC_VLAN(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SUPP_SET_VF_MAC_VLAN)) +#define FUNC_SUPPORT_CHANGE_MAC(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_SUPP_CHANGE_MAC)) +#define FUNC_FORCE_LINK_UP(hwdev) \ + (!!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_FORCE_LINK_UP)) +#define FUNC_SUPPORT_SCTP_CRC(hwdev) \ + (!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_OFFLOAD_OVS_UNSUPP)) +#define FUNC_SUPPORT_HW_VLAN(hwdev) \ + (!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_OFFLOAD_OVS_UNSUPP)) +#define FUNC_SUPPORT_LRO(hwdev) \ + (!(hifc_get_func_feature_cap(hwdev) & \ + HIFC_FUNC_OFFLOAD_OVS_UNSUPP)) + +int hifc_init_hwdev(struct hifc_init_para *para); +void hifc_free_hwdev(void *hwdev); +int hifc_stateful_init(void *hwdev); +void hifc_stateful_deinit(void *hwdev); +bool hifc_is_hwdev_mod_inited(void *hwdev, enum hifc_hwdev_init_state state); +u64 hifc_get_func_feature_cap(void *hwdev); +int hifc_slq_init(void *dev, int num_wqs); +void hifc_slq_uninit(void *dev); +int hifc_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, + u16 page_size, u64 *cla_addr, void **handle); +void hifc_slq_free(void *dev, void *handle); +u64 hifc_slq_get_addr(void *handle, u16 index); +u64 hifc_slq_get_first_pageaddr(void *handle); + +typedef void (*comm_up_self_msg_proc)(void *handle, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size); +void hifc_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd, + comm_up_self_msg_proc proc); +void hifc_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd); + +/* defined by chip */ +enum hifc_fault_type { + FAULT_TYPE_CHIP, + FAULT_TYPE_UCODE, + FAULT_TYPE_MEM_RD_TIMEOUT, + FAULT_TYPE_MEM_WR_TIMEOUT, + FAULT_TYPE_REG_RD_TIMEOUT, + FAULT_TYPE_REG_WR_TIMEOUT, + FAULT_TYPE_PHY_FAULT, + FAULT_TYPE_MAX, +}; + +/* defined by chip */ +enum hifc_fault_err_level { + /* default err_level=FAULT_LEVEL_FATAL if + * type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT || + * FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT || + * FAULT_TYPE_UCODE + * other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP + */ + FAULT_LEVEL_FATAL, + FAULT_LEVEL_SERIOUS_RESET, + FAULT_LEVEL_SERIOUS_FLR, + FAULT_LEVEL_GENERAL, + FAULT_LEVEL_SUGGESTION, + FAULT_LEVEL_MAX +}; + +enum hifc_fault_source_type { + /* same as FAULT_TYPE_CHIP */ + HIFC_FAULT_SRC_HW_MGMT_CHIP = 0, + /* same as FAULT_TYPE_UCODE */ + HIFC_FAULT_SRC_HW_MGMT_UCODE, + /* same as FAULT_TYPE_MEM_RD_TIMEOUT */ + HIFC_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, + /* same as FAULT_TYPE_MEM_WR_TIMEOUT */ + HIFC_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, + /* same as FAULT_TYPE_REG_RD_TIMEOUT */ + HIFC_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, + /* same as FAULT_TYPE_REG_WR_TIMEOUT */ + HIFC_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, + HIFC_FAULT_SRC_SW_MGMT_UCODE, + HIFC_FAULT_SRC_MGMT_WATCHDOG, + HIFC_FAULT_SRC_MGMT_RESET = 8, + HIFC_FAULT_SRC_HW_PHY_FAULT, + HIFC_FAULT_SRC_HOST_HEARTBEAT_LOST = 20, + HIFC_FAULT_SRC_TYPE_MAX, +}; + +struct hifc_fault_sw_mgmt { + u8 event_id; + u64 event_data; +}; + +union hifc_fault_hw_mgmt { + u32 val[4]; + /* valid only type==FAULT_TYPE_CHIP */ + struct { + u8 node_id; + /* enum hifc_fault_err_level */ + u8 err_level; + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR + */ + u16 func_id; + u16 rsvd2; + } chip; + + /* valid only type==FAULT_TYPE_UCODE */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT || + * FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_index; + } mem_timeout; + + /* valid only type==FAULT_TYPE_REG_RD_TIMEOUT || + * FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + + struct { + /* 0: read; 1: write */ + u8 op_type; + u8 port_id; + u8 dev_ad; + u8 rsvd9; + u32 csr_addr; + u32 op_data; + u32 rsvd10; + } phy_fault; +}; + +/* defined by chip */ +struct hifc_fault_event { + /* enum hifc_fault_type */ + u8 type; + u8 rsvd0[3]; + union hifc_fault_hw_mgmt event; +}; + +struct hifc_fault_recover_info { + u8 fault_src; /* enum hifc_fault_source_type */ + u8 fault_lev; /* enum hifc_fault_err_level */ + u8 rsvd0[2]; + union { + union hifc_fault_hw_mgmt hw_mgmt; + struct hifc_fault_sw_mgmt sw_mgmt; + u32 mgmt_rsvd[4]; + u32 host_rsvd[4]; + } fault_data; +}; + +struct hifc_dcb_state { + u8 dcb_on; + u8 default_cos; + u8 up_cos[8]; +}; + +enum link_err_type { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +enum port_module_event_type { + HIFC_PORT_MODULE_CABLE_PLUGGED, + HIFC_PORT_MODULE_CABLE_UNPLUGGED, + HIFC_PORT_MODULE_LINK_ERR, + HIFC_PORT_MODULE_MAX_EVENT, +}; + +struct hifc_port_module_event { + enum port_module_event_type type; + enum link_err_type err_type; +}; + +struct hifc_event_link_info { + u8 valid; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; +}; + +struct hifc_mctp_host_info { + u8 major_cmd; + u8 sub_cmd; + u8 rsvd[2]; + + u32 data_len; + void *data; +}; + +enum hifc_event_type { + HIFC_EVENT_LINK_DOWN = 0, + HIFC_EVENT_LINK_UP = 1, + HIFC_EVENT_HEART_LOST = 2, + HIFC_EVENT_FAULT = 3, + HIFC_EVENT_NOTIFY_VF_DCB_STATE = 4, + HIFC_EVENT_DCB_STATE_CHANGE = 5, + HIFC_EVENT_FMW_ACT_NTC = 6, + HIFC_EVENT_PORT_MODULE_EVENT = 7, + HIFC_EVENT_MCTP_GET_HOST_INFO, + HIFC_EVENT_MULTI_HOST_MGMT, + HIFC_EVENT_INIT_MIGRATE_PF, +}; + +struct hifc_event_info { + enum hifc_event_type type; + union { + struct hifc_event_link_info link_info; + struct hifc_fault_event info; + struct hifc_dcb_state dcb_state; + struct hifc_port_module_event module_event; + u8 vf_default_cos; + struct hifc_mctp_host_info mctp_info; + }; +}; + +enum hifc_ucode_event_type { + HIFC_INTERNAL_TSO_FATAL_ERROR = 0x0, + HIFC_INTERNAL_LRO_FATAL_ERROR = 0x1, + HIFC_INTERNAL_TX_FATAL_ERROR = 0x2, + HIFC_INTERNAL_RX_FATAL_ERROR = 0x3, + HIFC_INTERNAL_OTHER_FATAL_ERROR = 0x4, + HIFC_NIC_FATAL_ERROR_MAX = 0x8, +}; + +typedef void (*hifc_event_handler)(void *handle, + struct hifc_event_info *event); +/* only register once */ +void hifc_event_register(void *dev, void *pri_handle, + hifc_event_handler callback); +void hifc_event_unregister(void *dev); + +void hifc_detect_hw_present(void *hwdev); + +void hifc_set_chip_absent(void *hwdev); + +int hifc_get_chip_present_flag(void *hwdev); + +void hifc_set_pcie_order_cfg(void *handle); + +int hifc_get_mgmt_channel_status(void *handle); + +struct hifc_board_info { + u32 board_type; + u32 port_num; + u32 port_speed; + u32 pcie_width; + u32 host_num; + u32 pf_num; + u32 vf_total_num; + u32 tile_num; + u32 qcm_num; + u32 core_num; + u32 work_mode; + u32 service_mode; + u32 pcie_mode; + u32 cfg_addr; + u32 boot_sel; + u32 board_id; +}; + +int hifc_get_board_info(void *hwdev, struct hifc_board_info *info); + +int hifc_get_card_present_state(void *hwdev, bool *card_present_state); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_hwdev.c b/drivers/scsi/huawei/hifc/hifc_hwdev.c new file mode 100644 index 000000000000..760e02394b05 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hwdev.c @@ -0,0 +1,3675 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/completion.h> +#include <linux/semaphore.h> +#include <linux/interrupt.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_eqs.h" +#include "hifc_wq.h" +#include "hifc_cmdq.h" +#include "hifc_hwif.h" + +#define HIFC_DEAULT_EQ_MSIX_PENDING_LIMIT 0 +#define HIFC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF +#define HIFC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7 +#define HIFC_FLR_TIMEOUT 1000 +#define HIFC_HT_GPA_PAGE_SIZE 4096UL +#define HIFC_PPF_HT_GPA_SET_RETRY_TIMES 10 +#define HIFC_GET_SFP_INFO_REAL_TIME 0x1 +#define HIFC_GLB_SO_RO_CFG_SHIFT 0x0 +#define HIFC_GLB_SO_RO_CFG_MASK 0x1 +#define HIFC_DISABLE_ORDER 0 +#define HIFC_GLB_DMA_SO_RO_GET(val, member) \ + (((val) >> HIFC_GLB_##member##_SHIFT) & HIFC_GLB_##member##_MASK) + +#define HIFC_GLB_DMA_SO_R0_CLEAR(val, member) \ + ((val) & (~(HIFC_GLB_##member##_MASK << HIFC_GLB_##member##_SHIFT))) + +#define HIFC_GLB_DMA_SO_R0_SET(val, member) \ + (((val) & HIFC_GLB_##member##_MASK) << HIFC_GLB_##member##_SHIFT) + +#define HIFC_MGMT_CHANNEL_STATUS_SHIFT 0x0 +#define HIFC_MGMT_CHANNEL_STATUS_MASK 0x1 +#define HIFC_ACTIVE_STATUS_MASK 0x80000000 +#define HIFC_ACTIVE_STATUS_CLEAR 0x7FFFFFFF + +#define HIFC_GET_MGMT_CHANNEL_STATUS(val, member) \ + (((val) >> HIFC_##member##_SHIFT) & HIFC_##member##_MASK) + +#define HIFC_CLEAR_MGMT_CHANNEL_STATUS(val, member) \ + ((val) & (~(HIFC_##member##_MASK << HIFC_##member##_SHIFT))) + +#define HIFC_SET_MGMT_CHANNEL_STATUS(val, member) \ + (((val) & HIFC_##member##_MASK) << HIFC_##member##_SHIFT) + +#define HIFC_BOARD_IS_PHY(hwdev) \ + ((hwdev)->board_info.board_type == 4 && \ + (hwdev)->board_info.board_id == 24) + +struct comm_info_ht_gpa_set { + u8 status; + u8 version; + u8 rsvd0[6]; + u32 rsvd1; + u32 rsvd2; + u64 page_pa0; + u64 page_pa1; +}; + +struct hifc_cons_idx_attr { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u32 sq_id; + u64 ci_addr; +}; + +struct hifc_clear_doorbell { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + u8 rsvd1; +}; + +struct hifc_clear_resource { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + u8 rsvd1; +}; + +struct hifc_msix_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 msix_index; + u8 pending_cnt; + u8 coalesct_timer_cnt; + u8 lli_tmier_cnt; + u8 lli_credit_cnt; + u8 resend_timer_cnt; + u8 rsvd1[3]; +}; + +enum func_tmr_bitmap_status { + FUNC_TMR_BITMAP_DISABLE, + FUNC_TMR_BITMAP_ENABLE, +}; + +struct hifc_func_tmr_bitmap_op { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 op_id; /* 0:start; 1:stop */ + u8 ppf_idx; + u32 rsvd1; +}; + +struct hifc_ppf_tmr_op { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 ppf_idx; + u8 op_id; /* 0: stop timer; 1:start timer */ + u8 rsvd1[2]; + u32 rsvd2; +}; + +struct hifc_cmd_set_res_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 state; + u8 rsvd1; + u32 rsvd2; +}; + +int hifc_hw_rx_buf_size[] = { + HIFC_RX_BUF_SIZE_32B, + HIFC_RX_BUF_SIZE_64B, + HIFC_RX_BUF_SIZE_96B, + HIFC_RX_BUF_SIZE_128B, + HIFC_RX_BUF_SIZE_192B, + HIFC_RX_BUF_SIZE_256B, + HIFC_RX_BUF_SIZE_384B, + HIFC_RX_BUF_SIZE_512B, + HIFC_RX_BUF_SIZE_768B, + HIFC_RX_BUF_SIZE_1K, + HIFC_RX_BUF_SIZE_1_5K, + HIFC_RX_BUF_SIZE_2K, + HIFC_RX_BUF_SIZE_3K, + HIFC_RX_BUF_SIZE_4K, + HIFC_RX_BUF_SIZE_8K, + HIFC_RX_BUF_SIZE_16K, +}; + +struct hifc_comm_board_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hifc_board_info info; + + u32 rsvd1[4]; +}; + +#define PHY_DOING_INIT_TIMEOUT (15 * 1000) + +struct hifc_phy_init_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 init_status; + u8 rsvd1[3]; +}; + +enum phy_init_status_type { + PHY_INIT_DOING = 0, + PHY_INIT_SUCCESS = 1, + PHY_INIT_FAIL = 2, + PHY_NONSUPPORT = 3, +}; + +struct hifc_update_active { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 update_flag; + u32 update_status; +}; + +struct hifc_mgmt_watchdog_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 curr_time_h; + u32 curr_time_l; + u32 task_id; + u32 rsv; + + u32 reg[13]; + u32 pc; + u32 lr; + u32 cpsr; + + u32 stack_top; + u32 stack_bottom; + u32 sp; + u32 curr_used; + u32 peak_used; + u32 is_overflow; + + u32 stack_actlen; + u8 data[1024]; +}; + +struct hifc_fmw_act_ntc { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 rsvd1[5]; +}; + +#define HIFC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) + +struct hifc_wq_page_size { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 ppf_idx; + /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ + u8 page_size; + + u32 rsvd1; +}; + +#define MAX_PCIE_DFX_BUF_SIZE (1024) + +struct hifc_pcie_dfx_ntc { + u8 status; + u8 version; + u8 rsvd0[6]; + + int len; + u32 rsvd; +}; + +struct hifc_pcie_dfx_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 host_id; + u8 last; + u8 rsvd[2]; + u32 offset; + + u8 data[MAX_PCIE_DFX_BUF_SIZE]; +}; + +struct hifc_reg_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 reg_addr; + u32 val_length; + + u32 data[2]; +}; + +#define HIFC_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define HIFC_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define HIFC_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define HIFC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define HIFC_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define HIFC_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define HIFC_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define HIFC_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define HIFC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define HIFC_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define HIFC_DMA_ATTR_ENTRY_SET(val, member) \ + (((u32)(val) & HIFC_DMA_ATTR_ENTRY_##member##_MASK) << \ + HIFC_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define HIFC_DMA_ATTR_ENTRY_CLEAR(val, member) \ + ((val) & (~(HIFC_DMA_ATTR_ENTRY_##member##_MASK \ + << HIFC_DMA_ATTR_ENTRY_##member##_SHIFT))) + +#define HIFC_PCIE_ST_DISABLE 0 +#define HIFC_PCIE_AT_DISABLE 0 +#define HIFC_PCIE_PH_DISABLE 0 + +#define PCIE_MSIX_ATTR_ENTRY 0 + +#define HIFC_CHIP_PRESENT 1 +#define HIFC_CHIP_ABSENT 0 + +struct hifc_cmd_fault_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hifc_fault_event event; +}; + +#define HEARTBEAT_DRV_MAGIC_ACK 0x5A5A5A5A + +struct hifc_heartbeat_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 mgmt_init_state; + u8 rsvd1[3]; + u32 heart; /* increased every event */ + u32 drv_heart; +}; + +static void hifc_set_mgmt_channel_status(void *handle, bool state) +{ + struct hifc_hwdev *hwdev = handle; + u32 val; + + if (!hwdev || hifc_func_type(hwdev) == TYPE_VF || + !(hwdev->feature_cap & HIFC_FUNC_SUPP_DFX_REG)) + return; + + val = hifc_hwif_read_reg(hwdev->hwif, HIFC_ICPL_RESERVD_ADDR); + val = HIFC_CLEAR_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); + val |= HIFC_SET_MGMT_CHANNEL_STATUS((u32)state, MGMT_CHANNEL_STATUS); + + hifc_hwif_write_reg(hwdev->hwif, HIFC_ICPL_RESERVD_ADDR, val); +} + +static void hifc_enable_mgmt_channel(void *hwdev, void *buf_out) +{ + struct hifc_hwdev *dev = hwdev; + struct hifc_update_active *active_info = buf_out; + + if (!active_info || hifc_func_type(hwdev) == TYPE_VF || + !(dev->feature_cap & HIFC_FUNC_SUPP_DFX_REG)) + return; + + if ((!active_info->status) && + (active_info->update_status & HIFC_ACTIVE_STATUS_MASK)) { + active_info->update_status &= HIFC_ACTIVE_STATUS_CLEAR; + return; + } + + hifc_set_mgmt_channel_status(hwdev, false); +} + +int hifc_set_wq_page_size(struct hifc_hwdev *hwdev, u16 func_idx, + u32 page_size); + +#define HIFC_QUEUE_MIN_DEPTH 6 +#define HIFC_QUEUE_MAX_DEPTH 12 +#define HIFC_MAX_RX_BUFFER_SIZE 15 + +#define ROOT_CTX_QPS_VALID(root_ctxt) \ + ((root_ctxt)->rq_depth >= HIFC_QUEUE_MIN_DEPTH && \ + (root_ctxt)->rq_depth <= HIFC_QUEUE_MAX_DEPTH && \ + (root_ctxt)->sq_depth >= HIFC_QUEUE_MIN_DEPTH && \ + (root_ctxt)->sq_depth <= HIFC_QUEUE_MAX_DEPTH && \ + (root_ctxt)->rx_buf_sz <= HIFC_MAX_RX_BUFFER_SIZE) + +struct hifc_mgmt_status_log { + u8 status; + const char *log; +}; + +struct hifc_mgmt_status_log mgmt_status_log[] = { + {HIFC_MGMT_STATUS_ERR_PARAM, "Invalid parameter"}, + {HIFC_MGMT_STATUS_ERR_FAILED, "Operation failed"}, + {HIFC_MGMT_STATUS_ERR_PORT, "Invalid port"}, + {HIFC_MGMT_STATUS_ERR_TIMEOUT, "Operation time out"}, + {HIFC_MGMT_STATUS_ERR_NOMATCH, "Version not match"}, + {HIFC_MGMT_STATUS_ERR_EXIST, "Entry exists"}, + {HIFC_MGMT_STATUS_ERR_NOMEM, "Out of memory"}, + {HIFC_MGMT_STATUS_ERR_INIT, "Feature not initialized"}, + {HIFC_MGMT_STATUS_ERR_FAULT, "Invalid address"}, + {HIFC_MGMT_STATUS_ERR_PERM, "Operation not permitted"}, + {HIFC_MGMT_STATUS_ERR_EMPTY, "Table empty"}, + {HIFC_MGMT_STATUS_ERR_FULL, "Table full"}, + {HIFC_MGMT_STATUS_ERR_NOT_FOUND, "Not found"}, + {HIFC_MGMT_STATUS_ERR_BUSY, "Device or resource busy "}, + {HIFC_MGMT_STATUS_ERR_RESOURCE, "No resources for operation "}, + {HIFC_MGMT_STATUS_ERR_CONFIG, "Invalid configuration"}, + {HIFC_MGMT_STATUS_ERR_UNAVAIL, "Feature unavailable"}, + {HIFC_MGMT_STATUS_ERR_CRC, "CRC check failed"}, + {HIFC_MGMT_STATUS_ERR_NXIO, "No such device or address"}, + {HIFC_MGMT_STATUS_ERR_ROLLBACK, "Chip rollback fail"}, + {HIFC_MGMT_STATUS_ERR_LEN, "Length too short or too long"}, + {HIFC_MGMT_STATUS_ERR_UNSUPPORT, "Feature not supported"}, +}; + +static void __print_status_info(struct hifc_hwdev *dev, + enum hifc_mod_type mod, u8 cmd, int index) +{ + if (mod == HIFC_MOD_COMM) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s", + mod, cmd, mgmt_status_log[index].log); + } else if (mod == HIFC_MOD_L2NIC || + mod == HIFC_MOD_HILINK) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) fail: %s", + mod, cmd, mgmt_status_log[index].log); + } +} + +static bool hifc_status_need_special_handle(enum hifc_mod_type mod, + u8 cmd, u8 status) +{ + if (mod == HIFC_MOD_L2NIC) { + /* optical module isn't plugged in */ + if (((cmd == HIFC_PORT_CMD_GET_STD_SFP_INFO) || + (cmd == HIFC_PORT_CMD_GET_SFP_INFO)) && + (status == HIFC_MGMT_STATUS_ERR_NXIO)) + return true; + + if ((cmd == HIFC_PORT_CMD_SET_MAC || + cmd == HIFC_PORT_CMD_UPDATE_MAC) && + status == HIFC_MGMT_STATUS_ERR_EXIST) + return true; + } + + return false; +} + +static bool print_status_info_valid(enum hifc_mod_type mod, + const void *buf_out) +{ + if (!buf_out) + return false; + + if (mod != HIFC_MOD_COMM && mod != HIFC_MOD_L2NIC && + mod != HIFC_MOD_HILINK) + return false; + + return true; +} + +static void hifc_print_status_info(void *hwdev, enum hifc_mod_type mod, + u8 cmd, const void *buf_out) +{ + struct hifc_hwdev *dev = hwdev; + int i, size; + u8 status; + + if (!print_status_info_valid(mod, buf_out)) + return; + + status = *(u8 *)buf_out; + + if (!status) + return; + + if (hifc_status_need_special_handle(mod, cmd, status)) + return; + + size = sizeof(mgmt_status_log) / sizeof(mgmt_status_log[0]); + for (i = 0; i < size; i++) { + if (status == mgmt_status_log[i].status) { + __print_status_info(dev, mod, cmd, i); + return; + } + } + + if (mod == HIFC_MOD_COMM) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n", + mod, cmd, status); + } else if (mod == HIFC_MOD_L2NIC || mod == HIFC_MOD_HILINK) { + sdk_err(dev->dev_hdl, "Mgmt process mod(0x%x) cmd(0x%x) return driver unknown status(0x%x)\n", + mod, cmd, status); + } +} + +void hifc_set_chip_present(void *hwdev) +{ + ((struct hifc_hwdev *)hwdev)->chip_present_flag = HIFC_CHIP_PRESENT; +} + +void hifc_set_chip_absent(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + sdk_err(dev->dev_hdl, "Card not present\n"); + dev->chip_present_flag = HIFC_CHIP_ABSENT; +} + +int hifc_get_chip_present_flag(void *hwdev) +{ + int flag; + + if (!hwdev) + return -EINVAL; + flag = ((struct hifc_hwdev *)hwdev)->chip_present_flag; + return flag; +} + +void hifc_force_complete_all(void *hwdev) +{ + struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev; + struct hifc_recv_msg *recv_resp_msg; + + set_bit(HIFC_HWDEV_STATE_BUSY, &dev->func_state); + + if (hifc_func_type(dev) != TYPE_VF && + hifc_is_hwdev_mod_inited(dev, HIFC_HWDEV_MGMT_INITED)) { + recv_resp_msg = &dev->pf_to_mgmt->recv_resp_msg_from_mgmt; + if (dev->pf_to_mgmt->event_flag == SEND_EVENT_START) { + complete(&recv_resp_msg->recv_done); + dev->pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT; + } + } + + /* only flush sync cmdq to avoid blocking remove */ + if (hifc_is_hwdev_mod_inited(dev, HIFC_HWDEV_CMDQ_INITED)) + hifc_cmdq_flush_cmd(hwdev, + &dev->cmdqs->cmdq[HIFC_CMDQ_SYNC]); + + clear_bit(HIFC_HWDEV_STATE_BUSY, &dev->func_state); +} + +void hifc_detect_hw_present(void *hwdev) +{ + u32 addr, attr1; + + addr = HIFC_CSR_FUNC_ATTR1_ADDR; + attr1 = hifc_hwif_read_reg(((struct hifc_hwdev *)hwdev)->hwif, addr); + if (attr1 == HIFC_PCIE_LINK_DOWN) { + hifc_set_chip_absent(hwdev); + hifc_force_complete_all(hwdev); + } +} + +void hifc_record_pcie_error(void *hwdev) +{ + struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev; + + if (!hwdev) + return; + + atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats); +} + +static inline void __set_heartbeat_ehd_detect_delay(struct hifc_hwdev *hwdev, + u32 delay_ms) +{ + hwdev->heartbeat_ehd.start_detect_jiffies = + jiffies + msecs_to_jiffies(delay_ms); +} + +static int __pf_to_mgmt_pre_handle(struct hifc_hwdev *hwdev, + enum hifc_mod_type mod, u8 cmd) +{ + if (hifc_get_mgmt_channel_status(hwdev)) { + if (mod == HIFC_MOD_COMM || mod == HIFC_MOD_L2NIC) + return HIFC_DEV_BUSY_ACTIVE_FW; + else + return -EBUSY; + } + + /* Set channel invalid, don't allowed to send other cmd */ + if (mod == HIFC_MOD_COMM && cmd == HIFC_MGMT_CMD_ACTIVATE_FW) { + hifc_set_mgmt_channel_status(hwdev, true); + /* stop heartbeat enhanced detection temporary, and will + * restart in firmware active event when mgmt is resetted + */ + __set_heartbeat_ehd_detect_delay(hwdev, + HIFC_DEV_ACTIVE_FW_TIMEOUT); + } + + return 0; +} + +static void __pf_to_mgmt_after_handle(struct hifc_hwdev *hwdev, + enum hifc_mod_type mod, u8 cmd, + int sw_status, void *mgmt_status) +{ + /* if activate fw is failed, set channel valid */ + if (mod == HIFC_MOD_COMM && + cmd == HIFC_MGMT_CMD_ACTIVATE_FW) { + if (sw_status) + hifc_set_mgmt_channel_status(hwdev, false); + else + hifc_enable_mgmt_channel(hwdev, mgmt_status); + } +} + +int hifc_pf_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + int err; + + if (!hwdev) + return -EINVAL; + + if (!((struct hifc_hwdev *)hwdev)->chip_present_flag) + return -EPERM; + + if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED)) + return -EPERM; + + if (in_size > HIFC_MSG_TO_MGMT_MAX_LEN) + return -EINVAL; + + err = __pf_to_mgmt_pre_handle(hwdev, mod, cmd); + if (err) + return err; + + err = hifc_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + __pf_to_mgmt_after_handle(hwdev, mod, cmd, err, buf_out); + + return err; +} + +static bool is_sfp_info_cmd_cached(struct hifc_hwdev *hwdev, + enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_cmd_get_sfp_qsfp_info *sfp_info; + struct hifc_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + sfp_info = buf_in; + if (!chip_node->rt_cmd || sfp_info->port_id >= HIFC_MAX_PORT_ID || + *out_size < sizeof(*sfp_info)) + return false; + + if (sfp_info->version == HIFC_GET_SFP_INFO_REAL_TIME) + return false; + + rt_cmd = &chip_node->rt_cmd[sfp_info->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(buf_out, &rt_cmd->sfp_info, sizeof(*sfp_info)); + mutex_unlock(&chip_node->sfp_mutex); + + return true; +} + +static bool is_sfp_abs_cmd_cached(struct hifc_hwdev *hwdev, + enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_cmd_get_light_module_abs *abs; + struct hifc_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + abs = buf_in; + if (!chip_node->rt_cmd || abs->port_id >= HIFC_MAX_PORT_ID || + *out_size < sizeof(*abs)) + return false; + + if (abs->version == HIFC_GET_SFP_INFO_REAL_TIME) + return false; + + rt_cmd = &chip_node->rt_cmd[abs->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(buf_out, &rt_cmd->abs, sizeof(*abs)); + mutex_unlock(&chip_node->sfp_mutex); + + return true; +} + +static bool driver_processed_cmd(struct hifc_hwdev *hwdev, + enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct card_node *chip_node = hwdev->chip_node; + + if (mod == HIFC_MOD_L2NIC) { + if (cmd == HIFC_PORT_CMD_GET_SFP_INFO && + chip_node->rt_cmd->up_send_sfp_info) { + return is_sfp_info_cmd_cached(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size); + } else if (cmd == HIFC_PORT_CMD_GET_SFP_ABS && + chip_node->rt_cmd->up_send_sfp_abs) { + return is_sfp_abs_cmd_cached(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size); + } + } + + return false; +} + +static int send_sync_mgmt_msg(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + unsigned long end; + + end = jiffies + msecs_to_jiffies(HIFC_DEV_ACTIVE_FW_TIMEOUT); + do { + if (!hifc_get_mgmt_channel_status(hwdev) || + !hifc_get_chip_present_flag(hwdev)) + break; + + msleep(1000); + } while (time_before(jiffies, end)); + + if (driver_processed_cmd(hwdev, mod, cmd, buf_in, in_size, buf_out, + out_size)) + return 0; + + return hifc_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); +} + +int hifc_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + struct hifc_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + err = send_sync_mgmt_msg(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); + + hifc_print_status_info(hwdev, mod, cmd, buf_out); + + return err; +} + +/* PF/VF send msg to uP by api cmd, and return immediately */ +int hifc_msg_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + int err; + + if (!hwdev) + return -EINVAL; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag) || + !hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED) || + hifc_get_mgmt_channel_status(hwdev)) + return -EPERM; + + if (hifc_func_type(hwdev) == TYPE_VF) { + err = -EFAULT; + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Mailbox don't support async cmd\n"); + } else { + err = hifc_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size); + } + + return err; +} + +int hifc_msg_to_mgmt_no_ack(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + struct hifc_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!(dev->chip_present_flag)) + return -EPERM; + + err = hifc_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size); + + return err; +} + +/** + * hifc_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + **/ +void hifc_cpu_to_be32(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + u32 *mem = data; + + if (!data) + return; + + len = len / chunk_sz; + + for (i = 0; i < len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/** + * hifc_cpu_to_be32 - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + **/ +void hifc_be32_to_cpu(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + u32 *mem = data; + + if (!data) + return; + + len = len / chunk_sz; + + for (i = 0; i < len; i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} + +/** + * hifc_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + **/ +void hifc_set_sge(struct hifc_sge *sge, dma_addr_t addr, u32 len) +{ + sge->hi_addr = upper_32_bits(addr); + sge->lo_addr = lower_32_bits(addr); + sge->len = len; +} + +int hifc_set_ci_table(void *hwdev, u16 q_id, struct hifc_sq_attr *attr) +{ + struct hifc_cons_idx_attr cons_idx_attr = {0}; + u16 out_size = sizeof(cons_idx_attr); + int err; + + if (!hwdev || !attr) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &cons_idx_attr.func_idx); + if (err) + return err; + + cons_idx_attr.dma_attr_off = attr->dma_attr_off; + cons_idx_attr.pending_limit = attr->pending_limit; + cons_idx_attr.coalescing_time = attr->coalescing_time; + + if (attr->intr_en) { + cons_idx_attr.intr_en = attr->intr_en; + cons_idx_attr.intr_idx = attr->intr_idx; + } + + cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; + cons_idx_attr.sq_id = q_id; + + cons_idx_attr.ci_addr = attr->ci_dma_base; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET, + &cons_idx_attr, sizeof(cons_idx_attr), + &cons_idx_attr, &out_size, 0); + if (err || !out_size || cons_idx_attr.status) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cons_idx_attr.status, out_size); + return -EFAULT; + } + + return 0; +} + +static int hifc_set_cmdq_depth(struct hifc_hwdev *hwdev, u16 cmdq_depth) +{ + struct hifc_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + err = hifc_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hifc_ppf_idx(hwdev); + + root_ctxt.set_cmdq_depth = 1; + root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} + +static u16 get_hw_rx_buf_size(int rx_buf_sz) +{ +#define DEFAULT_RX_BUF_SIZE ((u16)0xB) + u16 num_hw_types = + sizeof(hifc_hw_rx_buf_size) / + sizeof(hifc_hw_rx_buf_size[0]); + u16 i; + + for (i = 0; i < num_hw_types; i++) { + if (hifc_hw_rx_buf_size[i] == rx_buf_sz) + return i; + } + + pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz); + + return DEFAULT_RX_BUF_SIZE; +} + +int hifc_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz) +{ + struct hifc_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hifc_ppf_idx(hwdev); + + root_ctxt.set_cmdq_depth = 0; + root_ctxt.cmdq_depth = 0; + + root_ctxt.lro_en = 1; + + root_ctxt.rq_depth = (u16)ilog2(rq_depth); + root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz); + root_ctxt.sq_depth = (u16)ilog2(sq_depth); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hifc_clean_root_ctxt(void *hwdev) +{ + struct hifc_root_ctxt root_ctxt = {0}; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &root_ctxt.func_idx); + if (err) + return err; + + root_ctxt.ppf_idx = hifc_ppf_idx(hwdev); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_VAT_SET, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, 0); + if (err || !out_size || root_ctxt.status) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.status, out_size); + return -EFAULT; + } + + return 0; +} + +static int wait_for_flr_finish(struct hifc_hwif *hwif) +{ + u32 cnt = 0; + enum hifc_pf_status status; + + while (cnt < HIFC_FLR_TIMEOUT) { + status = hifc_get_pf_status(hwif); + if (status == HIFC_PF_STATUS_FLR_FINISH_FLAG) { + hifc_set_pf_status(hwif, HIFC_PF_STATUS_ACTIVE_FLAG); + return 0; + } + + usleep_range(9900, 10000); + cnt++; + } + + return -EFAULT; +} + +#define HIFC_WAIT_CMDQ_IDLE_TIMEOUT 5000 + +static int wait_cmdq_stop(struct hifc_hwdev *hwdev) +{ + enum hifc_cmdq_type cmdq_type; + struct hifc_cmdqs *cmdqs = hwdev->cmdqs; + u32 cnt = 0; + int err = 0; + + if (!(cmdqs->status & HIFC_CMDQ_ENABLE)) + return 0; + + cmdqs->status &= ~HIFC_CMDQ_ENABLE; + + while (cnt < HIFC_WAIT_CMDQ_IDLE_TIMEOUT && hwdev->chip_present_flag) { + err = 0; + cmdq_type = HIFC_CMDQ_SYNC; + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + if (!hifc_cmdq_idle(&cmdqs->cmdq[cmdq_type])) { + err = -EBUSY; + break; + } + } + + if (!err) + return 0; + + usleep_range(500, 1000); + cnt++; + } + + cmdq_type = HIFC_CMDQ_SYNC; + for (; cmdq_type < HIFC_MAX_CMDQ_TYPES; cmdq_type++) { + if (!hifc_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + sdk_err(hwdev->dev_hdl, "Cmdq %d busy\n", cmdq_type); + } + + cmdqs->status |= HIFC_CMDQ_ENABLE; + + return err; +} + +static int hifc_pf_rx_tx_flush(struct hifc_hwdev *hwdev) +{ + struct hifc_hwif *hwif = hwdev->hwif; + struct hifc_clear_doorbell clear_db = {0}; + struct hifc_clear_resource clr_res = {0}; + u16 out_size, func_id; + int err; + int ret = 0; + + /* wait ucode stop I/O */ + msleep(100); + + err = wait_cmdq_stop(hwdev); + if (err) { + sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n"); + ret = err; + } + + hifc_disable_doorbell(hwif); + + out_size = sizeof(clear_db); + func_id = hifc_global_func_id_hw(hwdev); + clear_db.func_idx = func_id; + clear_db.ppf_idx = HIFC_HWIF_PPF_IDX(hwif); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_FLUSH_DOORBELL, &clear_db, + sizeof(clear_db), &clear_db, &out_size, 0); + if (err || !out_size || clear_db.status) { + sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x\n", + err, clear_db.status, out_size); + if (err) + ret = err; + else + ret = -EFAULT; + } + + hifc_set_pf_status(hwif, HIFC_PF_STATUS_FLR_START_FLAG); + + clr_res.func_idx = func_id; + clr_res.ppf_idx = HIFC_HWIF_PPF_IDX(hwif); + + err = hifc_msg_to_mgmt_no_ack(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_START_FLR, &clr_res, + sizeof(clr_res)); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to notice flush message\n"); + ret = err; + } + + err = wait_for_flr_finish(hwif); + if (err) { + sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); + ret = err; + } + + hifc_enable_doorbell(hwif); + + err = hifc_reinit_cmdq_ctxts(hwdev); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); + ret = err; + } + + return ret; +} + +int hifc_func_rx_tx_flush(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!hwdev) + return -EINVAL; + + if (!dev->chip_present_flag) + return 0; + + return hifc_pf_rx_tx_flush(dev); +} + +int hifc_get_interrupt_cfg(void *hwdev, + struct nic_interrupt_info *interrupt_info) +{ + struct hifc_hwdev *nic_hwdev = hwdev; + struct hifc_msix_config msix_cfg = {0}; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev || !interrupt_info) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &msix_cfg.func_id); + if (err) + return err; + + msix_cfg.msix_index = interrupt_info->msix_index; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, + &msix_cfg, sizeof(msix_cfg), + &msix_cfg, &out_size, 0); + if (err || !out_size || msix_cfg.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", + err, msix_cfg.status, out_size); + return -EINVAL; + } + + interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt; + interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt; + interrupt_info->pending_limt = msix_cfg.pending_cnt; + interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt; + interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt; + + return 0; +} + +int hifc_set_interrupt_cfg(void *hwdev, + struct nic_interrupt_info interrupt_info) +{ + struct hifc_hwdev *nic_hwdev = hwdev; + struct hifc_msix_config msix_cfg = {0}; + struct nic_interrupt_info temp_info; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + temp_info.msix_index = interrupt_info.msix_index; + + err = hifc_get_interrupt_cfg(hwdev, &temp_info); + if (err) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &msix_cfg.func_id); + if (err) + return err; + + msix_cfg.msix_index = (u16)interrupt_info.msix_index; + msix_cfg.lli_credit_cnt = temp_info.lli_credit_limit; + msix_cfg.lli_tmier_cnt = temp_info.lli_timer_cfg; + msix_cfg.pending_cnt = temp_info.pending_limt; + msix_cfg.coalesct_timer_cnt = temp_info.coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = temp_info.resend_timer_cfg; + + if (interrupt_info.lli_set) { + msix_cfg.lli_credit_cnt = interrupt_info.lli_credit_limit; + msix_cfg.lli_tmier_cnt = interrupt_info.lli_timer_cfg; + } + + if (interrupt_info.interrupt_coalesc_set) { + msix_cfg.pending_cnt = interrupt_info.pending_limt; + msix_cfg.coalesct_timer_cnt = interrupt_info.coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = interrupt_info.resend_timer_cfg; + } + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, + &msix_cfg, sizeof(msix_cfg), + &msix_cfg, &out_size, 0); + if (err || !out_size || msix_cfg.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", + err, msix_cfg.status, out_size); + return -EINVAL; + } + + return 0; +} + +#define HIFC_MSIX_CNT_RESEND_TIMER_SHIFT 29 +#define HIFC_MSIX_CNT_RESEND_TIMER_MASK 0x7U + +#define HIFC_MSIX_CNT_SET(val, member) \ + (((val) & HIFC_MSIX_CNT_##member##_MASK) << \ + HIFC_MSIX_CNT_##member##_SHIFT) + +void hifc_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, + u8 clear_resend_en) +{ + struct hifc_hwif *hwif; + u32 msix_ctrl = 0, addr; + + if (!hwdev) + return; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + msix_ctrl = HIFC_MSIX_CNT_SET(clear_resend_en, RESEND_TIMER); + + addr = HIFC_CSR_MSIX_CNT_ADDR(msix_idx); + + hifc_hwif_write_reg(hwif, addr, msix_ctrl); +} + +static int init_aeqs_msix_attr(struct hifc_hwdev *hwdev) +{ + struct hifc_aeqs *aeqs = hwdev->aeqs; + struct nic_interrupt_info info = {0}; + struct hifc_eq *eq; + u16 q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HIFC_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HIFC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HIFC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + eq = &aeqs->aeq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hifc_set_interrupt_cfg(hwdev, info); + if (err) { + sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +static int init_ceqs_msix_attr(struct hifc_hwdev *hwdev) +{ + struct hifc_ceqs *ceqs = hwdev->ceqs; + struct nic_interrupt_info info = {0}; + struct hifc_eq *eq; + u16 q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HIFC_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HIFC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HIFC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + eq = &ceqs->ceq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hifc_set_interrupt_cfg(hwdev, info); + if (err) { + sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %d failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +/** + * set_pf_dma_attr_entry - set the dma attributes for entry + * @hwdev: the pointer to hw device + * @entry_idx: the entry index in the dma table + * @st: PCIE TLP steering tag + * @at: PCIE TLP AT field + * @ph: PCIE TLP Processing Hint field + * @no_snooping: PCIE TLP No snooping + * @tph_en: PCIE TLP Processing Hint Enable + **/ +static void set_pf_dma_attr_entry(struct hifc_hwdev *hwdev, u32 entry_idx, + u8 st, u8 at, u8 ph, + enum hifc_pcie_nosnoop no_snooping, + enum hifc_pcie_tph tph_en) +{ + u32 addr, val, dma_attr_entry; + + /* Read Modify Write */ + addr = HIFC_CSR_DMA_ATTR_TBL_ADDR(entry_idx); + + val = hifc_hwif_read_reg(hwdev->hwif, addr); + val = HIFC_DMA_ATTR_ENTRY_CLEAR(val, ST) & + HIFC_DMA_ATTR_ENTRY_CLEAR(val, AT) & + HIFC_DMA_ATTR_ENTRY_CLEAR(val, PH) & + HIFC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING) & + HIFC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN); + + dma_attr_entry = HIFC_DMA_ATTR_ENTRY_SET(st, ST) | + HIFC_DMA_ATTR_ENTRY_SET(at, AT) | + HIFC_DMA_ATTR_ENTRY_SET(ph, PH) | + HIFC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) | + HIFC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN); + + val |= dma_attr_entry; + hifc_hwif_write_reg(hwdev->hwif, addr, val); +} + +/** + * dma_attr_table_init - initialize the the default dma attributes + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + **/ +static int dma_attr_table_init(struct hifc_hwdev *hwdev) +{ + int err = 0; + + set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY, + HIFC_PCIE_ST_DISABLE, + HIFC_PCIE_AT_DISABLE, + HIFC_PCIE_PH_DISABLE, + HIFC_PCIE_SNOOP, + HIFC_PCIE_TPH_DISABLE); + + return err; +} + +static int resources_state_set(struct hifc_hwdev *hwdev, + enum hifc_res_state state) +{ + struct hifc_cmd_set_res_state res_state = {0}; + u16 out_size = sizeof(res_state); + int err; + + err = hifc_global_func_id_get(hwdev, &res_state.func_idx); + if (err) + return err; + + res_state.state = state; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_RES_STATE_SET, + &res_state, sizeof(res_state), + &res_state, &out_size, 0); + if (err || !out_size || res_state.status) { + sdk_err(hwdev->dev_hdl, "Failed to set resources state, err: %d, status: 0x%x, out_size: 0x%x\n", + err, res_state.status, out_size); + return -EFAULT; + } + + return 0; +} + +static void comm_mgmt_msg_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt = pri_handle; + u8 cmd_idx; + u32 *mem; + u16 i; + + for (cmd_idx = 0; cmd_idx < pf_to_mgmt->proc.cmd_num; cmd_idx++) { + if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) { + if (!pf_to_mgmt->proc.info[cmd_idx].proc) { + sdk_warn(pf_to_mgmt->hwdev->dev_hdl, + "PF recv up comm msg handle null, cmd(0x%x)\n", + cmd); + } else { + pf_to_mgmt->proc.info[cmd_idx].proc(hwdev, + buf_in, in_size, buf_out, out_size); + } + + return; + } + } + + sdk_warn(pf_to_mgmt->hwdev->dev_hdl, "Received mgmt cpu event: 0x%x\n", + cmd); + + mem = buf_in; + for (i = 0; i < (in_size / sizeof(u32)); i++) { + pr_info("0x%x\n", *mem); + mem++; + } + + *out_size = 0; +} + +static int hifc_comm_aeqs_init(struct hifc_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HIFC_MAX_AEQS] = {{0} }; + u16 num_aeqs, resp_num_irq = 0, i; + int err; + + num_aeqs = HIFC_HWIF_NUM_AEQS(hwdev->hwif); + if (num_aeqs > HIFC_MAX_AEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + HIFC_MAX_AEQS); + num_aeqs = HIFC_MAX_AEQS; + } + err = hifc_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs, + &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %d\n", + num_aeqs); + return err; + } + + if (resp_num_irq < num_aeqs) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + resp_num_irq); + num_aeqs = resp_num_irq; + } + + err = hifc_aeqs_init(hwdev, num_aeqs, aeq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n"); + goto aeqs_init_err; + } + + set_bit(HIFC_HWDEV_AEQ_INITED, &hwdev->func_state); + + return 0; + +aeqs_init_err: + for (i = 0; i < num_aeqs; i++) + hifc_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); + + return err; +} + +static void hifc_comm_aeqs_free(struct hifc_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HIFC_MAX_AEQS] = {{0} }; + u16 num_irqs, i; + + clear_bit(HIFC_HWDEV_AEQ_INITED, &hwdev->func_state); + + hifc_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs); + hifc_aeqs_free(hwdev); + for (i = 0; i < num_irqs; i++) + hifc_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); +} + +static int hifc_comm_ceqs_init(struct hifc_hwdev *hwdev) +{ + struct irq_info ceq_irqs[HIFC_MAX_CEQS] = {{0} }; + u16 num_ceqs, resp_num_irq = 0, i; + int err; + + num_ceqs = HIFC_HWIF_NUM_CEQS(hwdev->hwif); + if (num_ceqs > HIFC_MAX_CEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + HIFC_MAX_CEQS); + num_ceqs = HIFC_MAX_CEQS; + } + + err = hifc_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs, + &resp_num_irq); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %d\n", + num_ceqs); + return err; + } + + if (resp_num_irq < num_ceqs) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + resp_num_irq); + num_ceqs = resp_num_irq; + } + + err = hifc_ceqs_init(hwdev, num_ceqs, ceq_irqs); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to init ceqs, err:%d\n", err); + goto ceqs_init_err; + } + + return 0; + +ceqs_init_err: + for (i = 0; i < num_ceqs; i++) + hifc_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); + + return err; +} + +static void hifc_comm_ceqs_free(struct hifc_hwdev *hwdev) +{ + struct irq_info ceq_irqs[HIFC_MAX_CEQS] = {{0} }; + u16 num_irqs; + int i; + + hifc_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs); + hifc_ceqs_free(hwdev); + for (i = 0; i < num_irqs; i++) + hifc_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); +} + +static int hifc_comm_pf_to_mgmt_init(struct hifc_hwdev *hwdev) +{ + int err; + + if (hifc_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return 0; /* VF do not support send msg to mgmt directly */ + + err = hifc_pf_to_mgmt_init(hwdev); + if (err) + return err; + + hifc_aeq_register_hw_cb(hwdev, HIFC_MSG_FROM_MGMT_CPU, + hifc_mgmt_msg_aeqe_handler); + + hifc_register_mgmt_msg_cb(hwdev, HIFC_MOD_COMM, + hwdev->pf_to_mgmt, comm_mgmt_msg_handler); + + set_bit(HIFC_HWDEV_MGMT_INITED, &hwdev->func_state); + + return 0; +} + +static void hifc_comm_pf_to_mgmt_free(struct hifc_hwdev *hwdev) +{ + if (hifc_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return; /* VF do not support send msg to mgmt directly */ + + hifc_unregister_mgmt_msg_cb(hwdev, HIFC_MOD_COMM); + + hifc_aeq_unregister_hw_cb(hwdev, HIFC_MSG_FROM_MGMT_CPU); + + hifc_pf_to_mgmt_free(hwdev); +} + +static int hifc_comm_clp_to_mgmt_init(struct hifc_hwdev *hwdev) +{ + int err; + + if (hifc_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return 0; + + err = hifc_clp_pf_to_mgmt_init(hwdev); + if (err) + return err; + + set_bit(HIFC_HWDEV_CLP_INITED, &hwdev->func_state); + + return 0; +} + +static void hifc_comm_clp_to_mgmt_free(struct hifc_hwdev *hwdev) +{ + if (hifc_func_type(hwdev) == TYPE_VF || + !FUNC_SUPPORT_MGMT(hwdev)) + return; + + clear_bit(HIFC_HWDEV_CLP_INITED, &hwdev->func_state); + hifc_clp_pf_to_mgmt_free(hwdev); +} + +static int hifc_comm_cmdqs_init(struct hifc_hwdev *hwdev) +{ + int err; + + err = hifc_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + return err; + } + + hifc_ceq_register_cb(hwdev, HIFC_CMDQ, hifc_cmdq_ceq_handler); + + err = hifc_set_cmdq_depth(hwdev, HIFC_CMDQ_DEPTH); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n"); + goto set_cmdq_depth_err; + } + + return 0; + +set_cmdq_depth_err: + hifc_cmdqs_free(hwdev); + + return err; +} + +static void hifc_comm_cmdqs_free(struct hifc_hwdev *hwdev) +{ + hifc_ceq_unregister_cb(hwdev, HIFC_CMDQ); + hifc_cmdqs_free(hwdev); +} + +static int hifc_sync_mgmt_func_state(struct hifc_hwdev *hwdev) +{ + int err; + + hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_ACTIVE_FLAG); + + err = resources_state_set(hwdev, HIFC_RES_ACTIVE); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to set function resources state\n"); + goto resources_state_set_err; + } + + hwdev->heartbeat_ehd.en = false; + if (HIFC_FUNC_TYPE(hwdev) == TYPE_PPF) { + /* heartbeat synchronize must be after set pf active status */ + hifc_comm_recv_mgmt_self_cmd_reg( + hwdev, HIFC_MGMT_CMD_HEARTBEAT_EVENT, + mgmt_heartbeat_event_handler); + } + + return 0; + +resources_state_set_err: + hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_INIT); + + return err; +} + +static void hifc_unsync_mgmt_func_state(struct hifc_hwdev *hwdev) +{ + hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_INIT); + + hwdev->heartbeat_ehd.en = false; + if (HIFC_FUNC_TYPE(hwdev) == TYPE_PPF) { + hifc_comm_recv_up_self_cmd_unreg( + hwdev, HIFC_MGMT_CMD_HEARTBEAT_EVENT); + } + + resources_state_set(hwdev, HIFC_RES_CLEAN); +} + +int hifc_set_vport_enable(void *hwdev, bool enable) +{ + struct hifc_hwdev *nic_hwdev = (struct hifc_hwdev *)hwdev; + struct hifc_vport_state en_state = {0}; + u16 out_size = sizeof(en_state); + int err; + + if (!hwdev) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &en_state.func_id); + if (err) + return err; + + en_state.state = enable ? 1 : 0; + + err = l2nic_msg_to_mgmt_sync(hwdev, HIFC_PORT_CMD_SET_VPORT_ENABLE, + &en_state, sizeof(en_state), + &en_state, &out_size); + if (err || !out_size || en_state.status) { + sdk_err(nic_hwdev->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x\n", + err, en_state.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hifc_l2nic_reset_base(struct hifc_hwdev *hwdev, u16 reset_flag) +{ + struct hifc_l2nic_reset l2nic_reset = {0}; + u16 out_size = sizeof(l2nic_reset); + int err = 0; + + err = hifc_set_vport_enable(hwdev, false); + if (err) + return err; + + msleep(100); + + sdk_info(hwdev->dev_hdl, "L2nic reset flag 0x%x\n", reset_flag); + + err = hifc_global_func_id_get(hwdev, &l2nic_reset.func_id); + if (err) + return err; + + l2nic_reset.reset_flag = reset_flag; + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_L2NIC_RESET, &l2nic_reset, + sizeof(l2nic_reset), &l2nic_reset, + &out_size, 0); + if (err || !out_size || l2nic_reset.status) { + sdk_err(hwdev->dev_hdl, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n", + err, l2nic_reset.status, out_size); + return -EIO; + } + + return 0; +} + +static int hifc_l2nic_reset(struct hifc_hwdev *hwdev) +{ + return hifc_l2nic_reset_base(hwdev, 0); +} + +static int __get_func_misc_info(struct hifc_hwdev *hwdev) +{ + int err; + + err = hifc_get_board_info(hwdev, &hwdev->board_info); + if (err) { + sdk_err(hwdev->dev_hdl, "Get board info failed\n"); + return err; + } + + return 0; +} + +static int init_func_mode(struct hifc_hwdev *hwdev) +{ + int err; + + err = __get_func_misc_info(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to get function msic information\n"); + return err; + } + + err = hifc_l2nic_reset(hwdev); + if (err) + return err; + + return 0; +} + +static int __init_eqs_msix_attr(struct hifc_hwdev *hwdev) +{ + int err; + + err = init_aeqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n"); + return err; + } + + err = init_ceqs_msix_attr(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n"); + return err; + } + + return 0; +} + +static int init_cmdqs_channel(struct hifc_hwdev *hwdev) +{ + u16 func_id; + int err; + + dma_attr_table_init(hwdev); + + err = hifc_comm_ceqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n"); + return err; + } + + err = __init_eqs_msix_attr(hwdev); + if (err) + goto init_eqs_msix_err; + + /* set default wq page_size */ + hwdev->wq_page_size = HIFC_DEFAULT_WQ_PAGE_SIZE; + + err = hifc_global_func_id_get(hwdev, &func_id); + if (err) + goto get_func_id_err; + + err = hifc_set_wq_page_size(hwdev, func_id, hwdev->wq_page_size); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n"); + goto init_wq_pg_size_err; + } + + err = hifc_comm_cmdqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + goto cmdq_init_err; + } + + set_bit(HIFC_HWDEV_CMDQ_INITED, &hwdev->func_state); + + return 0; + +cmdq_init_err: + if (HIFC_FUNC_TYPE(hwdev) != TYPE_VF) + hifc_set_wq_page_size(hwdev, func_id, HIFC_HW_WQ_PAGE_SIZE); +init_wq_pg_size_err: +get_func_id_err: +init_eqs_msix_err: + hifc_comm_ceqs_free(hwdev); + + return err; +} + +static int init_mgmt_channel(struct hifc_hwdev *hwdev) +{ + int err; + + err = hifc_comm_clp_to_mgmt_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init clp\n"); + return err; + } + + err = hifc_comm_aeqs_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n"); + goto aeqs_init_err; + } + + err = hifc_comm_pf_to_mgmt_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init msg\n"); + goto msg_init_err; + } + + return err; + +msg_init_err: + hifc_comm_aeqs_free(hwdev); + +aeqs_init_err: + hifc_comm_clp_to_mgmt_free(hwdev); + + return err; +} + +/* initialize communication channel */ +int hifc_init_comm_ch(struct hifc_hwdev *hwdev) +{ + int err; + + err = init_mgmt_channel(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init mgmt channel\n"); + return err; + } + + err = init_func_mode(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init function mode\n"); + goto func_mode_err; + } + + err = init_cmdqs_channel(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to init cmdq channel\n"); + goto init_cmdqs_channel_err; + } + + err = hifc_sync_mgmt_func_state(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to synchronize mgmt function state\n"); + goto sync_mgmt_func_err; + } + + err = hifc_aeq_register_swe_cb(hwdev, HIFC_STATELESS_EVENT, + hifc_nic_sw_aeqe_handler); + if (err) { + sdk_err(hwdev->dev_hdl, + "Failed to register ucode aeqe handler\n"); + goto register_ucode_aeqe_err; + } + + set_bit(HIFC_HWDEV_COMM_CH_INITED, &hwdev->func_state); + + return 0; + +register_ucode_aeqe_err: + hifc_unsync_mgmt_func_state(hwdev); +sync_mgmt_func_err: + return err; + +init_cmdqs_channel_err: + +func_mode_err: + return err; +} + +static void __uninit_comm_module(struct hifc_hwdev *hwdev, + enum hifc_hwdev_init_state init_state) +{ + u16 func_id; + + switch (init_state) { + case HIFC_HWDEV_COMM_CH_INITED: + hifc_aeq_unregister_swe_cb(hwdev, + HIFC_STATELESS_EVENT); + hifc_unsync_mgmt_func_state(hwdev); + break; + case HIFC_HWDEV_CMDQ_INITED: + hifc_comm_cmdqs_free(hwdev); + /* VF can set page size of 256K only, any other value + * will return error in pf, pf will set all vf's page + * size to 4K when disable sriov + */ + if (HIFC_FUNC_TYPE(hwdev) != TYPE_VF) { + func_id = hifc_global_func_id_hw(hwdev); + hifc_set_wq_page_size(hwdev, func_id, + HIFC_HW_WQ_PAGE_SIZE); + } + + hifc_comm_ceqs_free(hwdev); + + break; + case HIFC_HWDEV_MBOX_INITED: + break; + case HIFC_HWDEV_MGMT_INITED: + hifc_comm_pf_to_mgmt_free(hwdev); + break; + case HIFC_HWDEV_AEQ_INITED: + hifc_comm_aeqs_free(hwdev); + break; + case HIFC_HWDEV_CLP_INITED: + hifc_comm_clp_to_mgmt_free(hwdev); + break; + default: + break; + } +} + +#define HIFC_FUNC_STATE_BUSY_TIMEOUT 300 +void hifc_uninit_comm_ch(struct hifc_hwdev *hwdev) +{ + enum hifc_hwdev_init_state init_state = HIFC_HWDEV_COMM_CH_INITED; + int cnt; + + while (init_state > HIFC_HWDEV_NONE_INITED) { + if (!test_bit(init_state, &hwdev->func_state)) { + init_state--; + continue; + } + clear_bit(init_state, &hwdev->func_state); + + cnt = 0; + while (test_bit(HIFC_HWDEV_STATE_BUSY, &hwdev->func_state) && + cnt++ <= HIFC_FUNC_STATE_BUSY_TIMEOUT) + usleep_range(900, 1000); + + __uninit_comm_module(hwdev, init_state); + + init_state--; + } +} + +int hifc_slq_init(void *dev, int num_wqs) +{ + struct hifc_hwdev *hwdev = dev; + int err; + + if (!dev) + return -EINVAL; + + hwdev->wqs = kzalloc(sizeof(*hwdev->wqs), GFP_KERNEL); + if (!hwdev->wqs) + return -ENOMEM; + + err = hifc_wqs_alloc(hwdev->wqs, num_wqs, hwdev->dev_hdl); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc wqs\n"); + kfree(hwdev->wqs); + hwdev->wqs = NULL; + } + + return err; +} + +void hifc_slq_uninit(void *dev) +{ + struct hifc_hwdev *hwdev = dev; + + if (!hwdev) + return; + + hifc_wqs_free(hwdev->wqs); + + kfree(hwdev->wqs); +} + +int hifc_slq_alloc(void *dev, u16 wqebb_size, u16 q_depth, u16 page_size, + u64 *cla_addr, void **handle) +{ + struct hifc_hwdev *hwdev = dev; + struct hifc_wq *wq; + int err; + + if (!dev || !cla_addr || !handle) + return -EINVAL; + + wq = kzalloc(sizeof(*wq), GFP_KERNEL); + if (!wq) + return -ENOMEM; + + err = hifc_wq_allocate(hwdev->wqs, wq, wqebb_size, hwdev->wq_page_size, + q_depth, 0); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to alloc wq\n"); + kfree(wq); + return -EFAULT; + } + + *cla_addr = wq->block_paddr; + *handle = wq; + + return 0; +} + +void hifc_slq_free(void *dev, void *handle) +{ + struct hifc_hwdev *hwdev = dev; + + if (!hwdev || !handle) + return; + + hifc_wq_free(hwdev->wqs, handle); + kfree(handle); +} + +u64 hifc_slq_get_addr(void *handle, u16 index) +{ + if (!handle) + return 0; /* NULL of wqe addr */ + + return (u64)hifc_get_wqebb_addr(handle, index); +} + +u64 hifc_slq_get_first_pageaddr(void *handle) +{ + struct hifc_wq *wq = handle; + + if (!handle) + return 0; /* NULL of wqe addr */ + + return hifc_get_first_wqe_page_addr(wq); +} + +int hifc_func_tmr_bitmap_set(void *hwdev, bool en) +{ + struct hifc_func_tmr_bitmap_op bitmap_op = {0}; + u16 out_size = sizeof(bitmap_op); + int err; + + if (!hwdev) + return -EINVAL; + + err = hifc_global_func_id_get(hwdev, &bitmap_op.func_idx); + if (err) + return err; + + bitmap_op.ppf_idx = hifc_ppf_idx(hwdev); + if (en) + bitmap_op.op_id = FUNC_TMR_BITMAP_ENABLE; + else + bitmap_op.op_id = FUNC_TMR_BITMAP_DISABLE; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_FUNC_TMR_BITMAT_SET, + &bitmap_op, sizeof(bitmap_op), + &bitmap_op, &out_size, 0); + if (err || !out_size || bitmap_op.status) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n", + err, bitmap_op.status, out_size); + return -EFAULT; + } + + return 0; +} + +int ppf_ht_gpa_set(struct hifc_hwdev *hwdev, struct hifc_page_addr *pg0, + struct hifc_page_addr *pg1) +{ + struct comm_info_ht_gpa_set ht_gpa_set = {0}; + u16 out_size = sizeof(ht_gpa_set); + int ret; + + pg0->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HIFC_HT_GPA_PAGE_SIZE, + &pg0->phys_addr, GFP_KERNEL); + if (!pg0->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n"); + return -EFAULT; + } + + pg1->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HIFC_HT_GPA_PAGE_SIZE, + &pg1->phys_addr, GFP_KERNEL); + if (!pg1->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n"); + return -EFAULT; + } + + ht_gpa_set.page_pa0 = pg0->phys_addr; + ht_gpa_set.page_pa1 = pg1->phys_addr; + sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n", + pg0->phys_addr, pg1->phys_addr); + ret = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_PPF_HT_GPA_SET, + &ht_gpa_set, sizeof(ht_gpa_set), + &ht_gpa_set, &out_size, 0); + if (ret || !out_size || ht_gpa_set.status) { + sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, ht_gpa_set.status, out_size); + return -EFAULT; + } + + hwdev->page_pa0.phys_addr = pg0->phys_addr; + hwdev->page_pa0.virt_addr = pg0->virt_addr; + + hwdev->page_pa1.phys_addr = pg1->phys_addr; + hwdev->page_pa1.virt_addr = pg1->virt_addr; + + return 0; +} + +int hifc_ppf_ht_gpa_init(struct hifc_hwdev *hwdev) +{ + int ret; + int i; + int j; + int size; + + struct hifc_page_addr page_addr0[HIFC_PPF_HT_GPA_SET_RETRY_TIMES]; + struct hifc_page_addr page_addr1[HIFC_PPF_HT_GPA_SET_RETRY_TIMES]; + + size = HIFC_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]); + memset(page_addr0, 0, size); + memset(page_addr1, 0, size); + + for (i = 0; i < HIFC_PPF_HT_GPA_SET_RETRY_TIMES; i++) { + ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]); + if (!ret) + break; + } + + for (j = 0; j < i; j++) { + if (page_addr0[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HIFC_HT_GPA_PAGE_SIZE, + page_addr0[j].virt_addr, + page_addr0[j].phys_addr); + page_addr0[j].virt_addr = NULL; + } + if (page_addr1[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HIFC_HT_GPA_PAGE_SIZE, + page_addr1[j].virt_addr, + page_addr1[j].phys_addr); + page_addr1[j].virt_addr = NULL; + } + } + + if (i >= HIFC_PPF_HT_GPA_SET_RETRY_TIMES) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n", + i); + return -EFAULT; + } + + return 0; +} + +void hifc_ppf_ht_gpa_deinit(struct hifc_hwdev *hwdev) +{ + if (hwdev->page_pa0.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HIFC_HT_GPA_PAGE_SIZE, + hwdev->page_pa0.virt_addr, + hwdev->page_pa0.phys_addr); + hwdev->page_pa0.virt_addr = NULL; + } + + if (hwdev->page_pa1.virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HIFC_HT_GPA_PAGE_SIZE, + hwdev->page_pa1.virt_addr, + hwdev->page_pa1.phys_addr); + hwdev->page_pa1.virt_addr = NULL; + } +} + +static int set_ppf_tmr_status(struct hifc_hwdev *hwdev, + enum ppf_tmr_status status) +{ + struct hifc_ppf_tmr_op op = {0}; + u16 out_size = sizeof(op); + int err = 0; + + if (!hwdev) + return -EINVAL; + + if (hifc_func_type(hwdev) != TYPE_PPF) + return -EFAULT; + + if (status == HIFC_PPF_TMR_FLAG_START) { + err = hifc_ppf_ht_gpa_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "PPF ht gpa init fail!\n"); + return -EFAULT; + } + } else { + hifc_ppf_ht_gpa_deinit(hwdev); + } + + op.op_id = status; + op.ppf_idx = hifc_ppf_idx(hwdev); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_PPF_TMR_SET, &op, + sizeof(op), &op, &out_size, 0); + if (err || !out_size || op.status) { + sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n", + err, op.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hifc_ppf_tmr_start(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for starting ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HIFC_PPF_TMR_FLAG_START); +} + +int hifc_ppf_tmr_stop(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for stop ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HIFC_PPF_TMR_FLAG_STOP); +} + +int hifc_set_wq_page_size(struct hifc_hwdev *hwdev, u16 func_idx, + u32 page_size) +{ + struct hifc_wq_page_size page_size_info = {0}; + u16 out_size = sizeof(page_size_info); + int err; + + page_size_info.func_idx = func_idx; + page_size_info.ppf_idx = hifc_ppf_idx(hwdev); + page_size_info.page_size = HIFC_PAGE_SIZE_HW(page_size); + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_PAGESIZE_SET, + &page_size_info, sizeof(page_size_info), + &page_size_info, &out_size, 0); + if (err || !out_size || page_size_info.status) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n", + err, page_size_info.status, out_size); + return -EFAULT; + } + + return 0; +} + +bool hifc_mgmt_event_ack_first(u8 mod, u8 cmd) +{ + if ((mod == HIFC_MOD_COMM && cmd == HIFC_MGMT_CMD_GET_HOST_INFO) || + (mod == HIFC_MOD_COMM && cmd == HIFC_MGMT_CMD_HEARTBEAT_EVENT)) + return false; + + if (mod == HIFC_MOD_COMM || mod == HIFC_MOD_L2NIC || + mod == HIFC_MOD_HILINK) + return true; + + return false; +} + +#define FAULT_SHOW_STR_LEN 16 + +static void chip_fault_show(struct hifc_hwdev *hwdev, + struct hifc_fault_event *event) +{ + char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = { + "fatal", "reset", "flr", "general", "suggestion"}; + char level_str[FAULT_SHOW_STR_LEN + 1]; + struct hifc_fault_event_stats *fault; + u8 node_id, level; + u32 pos, base; + + fault = &hwdev->hw_stats.fault_event_stats; + + memset(level_str, 0, FAULT_SHOW_STR_LEN + 1); + level = event->event.chip.err_level; + if (level < FAULT_LEVEL_MAX) + strncpy(level_str, fault_level[level], + FAULT_SHOW_STR_LEN); + else + strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN); + + if (level == FAULT_LEVEL_SERIOUS_FLR) { + sdk_err(hwdev->dev_hdl, "err_level: %d [%s], flr func_id: %d\n", + level, level_str, event->event.chip.func_id); + atomic_inc(&fault->fault_type_stat[event->type]); + } + sdk_err(hwdev->dev_hdl, "module_id: 0x%x, err_type: 0x%x, err_level: %d[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + event->event.chip.node_id, + event->event.chip.err_type, level, level_str, + event->event.chip.err_csr_addr, + event->event.chip.err_csr_value); + + node_id = event->event.chip.node_id; + atomic_inc(&fault->chip_fault_stats[node_id][level]); + + base = event->event.chip.node_id * FAULT_LEVEL_MAX * + HIFC_CHIP_ERROR_TYPE_MAX; + pos = base + HIFC_CHIP_ERROR_TYPE_MAX * level + + event->event.chip.err_type; + if (pos < HIFC_CHIP_FAULT_SIZE) + hwdev->chip_fault_stats[pos]++; +} + +static void fault_report_show(struct hifc_hwdev *hwdev, + struct hifc_fault_event *event) +{ + char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + "chip", "ucode", "mem rd timeout", "mem wr timeout", + "reg rd timeout", "reg wr timeout", "phy fault"}; + char type_str[FAULT_SHOW_STR_LEN + 1]; + struct hifc_fault_event_stats *fault; + + sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %d.\n", + hifc_global_func_id(hwdev)); + + memset(type_str, 0, FAULT_SHOW_STR_LEN + 1); + if (event->type < FAULT_TYPE_MAX) + strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN); + else + strncpy(type_str, "Unknown", FAULT_SHOW_STR_LEN); + + sdk_err(hwdev->dev_hdl, "Fault type: %d [%s]\n", event->type, type_str); + sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", + event->event.val[0], event->event.val[1], event->event.val[2], + event->event.val[3]); + + fault = &hwdev->hw_stats.fault_event_stats; + + switch (event->type) { + case FAULT_TYPE_CHIP: + chip_fault_show(hwdev, event); + break; + case FAULT_TYPE_UCODE: + atomic_inc(&fault->fault_type_stat[event->type]); + + sdk_err(hwdev->dev_hdl, "cause_id: %d, core_id: %d, c_id: %d, epc: 0x%08x\n", + event->event.ucode.cause_id, event->event.ucode.core_id, + event->event.ucode.c_id, event->event.ucode.epc); + break; + case FAULT_TYPE_MEM_RD_TIMEOUT: + case FAULT_TYPE_MEM_WR_TIMEOUT: + atomic_inc(&fault->fault_type_stat[event->type]); + + sdk_err(hwdev->dev_hdl, "err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n", + event->event.mem_timeout.err_csr_ctrl, + event->event.mem_timeout.err_csr_data, + event->event.mem_timeout.ctrl_tab, + event->event.mem_timeout.mem_index); + break; + case FAULT_TYPE_REG_RD_TIMEOUT: + case FAULT_TYPE_REG_WR_TIMEOUT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "err_csr: 0x%08x\n", + event->event.reg_timeout.err_csr); + break; + case FAULT_TYPE_PHY_FAULT: + atomic_inc(&fault->fault_type_stat[event->type]); + sdk_err(hwdev->dev_hdl, "op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", + event->event.phy_fault.op_type, + event->event.phy_fault.port_id, + event->event.phy_fault.dev_ad, + event->event.phy_fault.csr_addr, + event->event.phy_fault.op_data); + break; + default: + break; + } +} + +static void hifc_refresh_history_fault(struct hifc_hwdev *hwdev, + struct hifc_fault_recover_info *info) +{ + if (!hwdev->history_fault_flag) { + hwdev->history_fault_flag = true; + memcpy(&hwdev->history_fault, info, + sizeof(struct hifc_fault_recover_info)); + } else { + if (hwdev->history_fault.fault_lev >= info->fault_lev) + memcpy(&hwdev->history_fault, info, + sizeof(struct hifc_fault_recover_info)); + } +} + +static void fault_event_handler(struct hifc_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hifc_cmd_fault_event *fault_event; + struct hifc_event_info event_info; + struct hifc_fault_info_node *fault_node; + + if (in_size != sizeof(*fault_event)) { + sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %d, should be %ld.\n", + in_size, sizeof(*fault_event)); + return; + } + + fault_event = buf_in; + fault_report_show(hwdev, &fault_event->event); + + if (hwdev->event_callback) { + event_info.type = HIFC_EVENT_FAULT; + memcpy(&event_info.info, &fault_event->event, + sizeof(event_info.info)); + + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + /* refresh history fault info */ + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + if (fault_event->event.type <= FAULT_TYPE_REG_WR_TIMEOUT) + fault_node->info.fault_src = fault_event->event.type; + else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT) + fault_node->info.fault_src = HIFC_FAULT_SRC_HW_PHY_FAULT; + + if (fault_node->info.fault_src == HIFC_FAULT_SRC_HW_MGMT_CHIP) + fault_node->info.fault_lev = + fault_event->event.event.chip.err_level; + else + fault_node->info.fault_lev = FAULT_LEVEL_FATAL; + + memcpy(&fault_node->info.fault_data.hw_mgmt, &fault_event->event.event, + sizeof(union hifc_fault_hw_mgmt)); + hifc_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); +} + +static void heartbeat_lost_event_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_fault_info_node *fault_node; + struct hifc_event_info event_info = {0}; + + atomic_inc(&hwdev->hw_stats.heart_lost_stats); + sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", + hifc_global_func_id(hwdev)); + + if (hwdev->event_callback) { + event_info.type = HIFC_EVENT_HEART_LOST; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + /* refresh history fault info */ + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + fault_node->info.fault_src = HIFC_FAULT_SRC_HOST_HEARTBEAT_LOST; + fault_node->info.fault_lev = FAULT_LEVEL_FATAL; + hifc_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); +} + +static void sw_watchdog_timeout_info_show(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_mgmt_watchdog_info *watchdog_info; + u32 *dump_addr, *reg, stack_len, i, j; + + if (in_size != sizeof(*watchdog_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %d, should be %ld.\n", + in_size, sizeof(*watchdog_info)); + return; + } + + watchdog_info = buf_in; + + sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x\n", + watchdog_info->curr_time_h, watchdog_info->curr_time_l, + watchdog_info->task_id, watchdog_info->sp); + sdk_err(hwdev->dev_hdl, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x\n", + watchdog_info->curr_used, watchdog_info->peak_used, + watchdog_info->is_overflow, watchdog_info->stack_top, + watchdog_info->stack_bottom); + + sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr:0x%08x\n", + watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr); + + sdk_err(hwdev->dev_hdl, "Mgmt register info\n"); + + for (i = 0; i < 3; i++) { + reg = watchdog_info->reg + (u64)(u32)(4 * i); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *(reg), *(reg + 1), *(reg + 2), *(reg + 3)); + } + + sdk_err(hwdev->dev_hdl, "0x%08x\n", watchdog_info->reg[12]); + + if (watchdog_info->stack_actlen <= 1024) { + stack_len = watchdog_info->stack_actlen; + } else { + sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n", + watchdog_info->stack_actlen); + stack_len = 1024; + } + + sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16Bytes per line(start from sp)\n"); + for (i = 0; i < (stack_len / 16); i++) { + dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16))); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *dump_addr, *(dump_addr + 1), *(dump_addr + 2), + *(dump_addr + 3)); + } + + for (j = 0; j < ((stack_len % 16) / 4); j++) { + dump_addr = (u32 *)(watchdog_info->data + + ((u64)(u32)(i * 16 + j * 4))); + sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr); + } + + *out_size = sizeof(*watchdog_info); + watchdog_info = buf_out; + watchdog_info->status = 0; +} + +static void mgmt_watchdog_timeout_event_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_fault_info_node *fault_node; + + sw_watchdog_timeout_info_show(hwdev, buf_in, in_size, + buf_out, out_size); + + /* refresh history fault info */ + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + fault_node->info.fault_src = HIFC_FAULT_SRC_MGMT_WATCHDOG; + fault_node->info.fault_lev = FAULT_LEVEL_FATAL; + hifc_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); +} + +static void mgmt_reset_event_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + sdk_info(hwdev->dev_hdl, "Mgmt is reset\n"); + + /* mgmt reset only occurred when hot update or Mgmt deadloop, + * if Mgmt deadloop, mgmt will report an event with + * mod=0, cmd=0x56, and will reported fault to os, + * so mgmt reset event don't need to report fault + */ +} + +static void hifc_fmw_act_ntc_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_event_info event_info = {0}; + struct hifc_fmw_act_ntc *notice_info; + + if (in_size != sizeof(*notice_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld.\n", + in_size, sizeof(*notice_info)); + return; + } + + /* mgmt is active now, restart heartbeat enhanced detection */ + __set_heartbeat_ehd_detect_delay(hwdev, 0); + + if (!hwdev->event_callback) + return; + + event_info.type = HIFC_EVENT_FMW_ACT_NTC; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + *out_size = sizeof(*notice_info); + notice_info = buf_out; + notice_info->status = 0; +} + +static void hifc_pcie_dfx_event_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_pcie_dfx_ntc *notice_info = buf_in; + struct hifc_pcie_dfx_info *dfx_info; + u16 size = 0; + u16 cnt = 0; + u32 num = 0; + u32 i, j; + int err; + u32 *reg; + + if (in_size != sizeof(*notice_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt firmware active notice, length: %d, should be %ld.\n", + in_size, sizeof(*notice_info)); + return; + } + + dfx_info = kzalloc(sizeof(*dfx_info), GFP_KERNEL); + if (!dfx_info) { + sdk_err(hwdev->dev_hdl, "Malloc dfx_info memory failed\n"); + return; + } + + ((struct hifc_pcie_dfx_ntc *)buf_out)->status = 0; + *out_size = sizeof(*notice_info); + num = (u32)(notice_info->len / 1024); + sdk_info(hwdev->dev_hdl, "INFO LEN: %d\n", notice_info->len); + sdk_info(hwdev->dev_hdl, "PCIE DFX:\n"); + dfx_info->host_id = 0; + for (i = 0; i < num; i++) { + dfx_info->offset = i * MAX_PCIE_DFX_BUF_SIZE; + if (i == (num - 1)) + dfx_info->last = 1; + size = sizeof(*dfx_info); + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_PCIE_DFX_GET, + dfx_info, sizeof(*dfx_info), + dfx_info, &size, 0); + if (err || dfx_info->status || !size) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x\n", + err, dfx_info->status, size); + kfree(dfx_info); + return; + } + + reg = (u32 *)dfx_info->data; + for (j = 0; j < 256; j = j + 8) { + /*lint -save -e661 -e662*/ + sdk_info(hwdev->dev_hdl, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + cnt, reg[j], reg[(u32)(j + 1)], + reg[(u32)(j + 2)], reg[(u32)(j + 3)], + reg[(u32)(j + 4)], reg[(u32)(j + 5)], + reg[(u32)(j + 6)], reg[(u32)(j + 7)]); + /*lint -restore*/ + cnt = cnt + 32; + } + memset(dfx_info->data, 0, MAX_PCIE_DFX_BUF_SIZE); + } + kfree(dfx_info); +} + +struct hifc_mctp_get_host_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 huawei_cmd; + u8 sub_cmd; + u8 rsvd[2]; + + u32 actual_len; + + u8 data[1024]; +}; + +static void hifc_mctp_get_host_info_event_handler(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_event_info event_info = {0}; + struct hifc_mctp_get_host_info *mctp_out, *mctp_in; + struct hifc_mctp_host_info *host_info; + + if (in_size != sizeof(*mctp_in)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt mctp info, length: %d, should be %ld\n", + in_size, sizeof(*mctp_in)); + return; + } + + *out_size = sizeof(*mctp_out); + mctp_out = buf_out; + mctp_out->status = 0; + + if (!hwdev->event_callback) { + mctp_out->status = HIFC_MGMT_STATUS_ERR_INIT; + return; + } + + mctp_in = buf_in; + host_info = &event_info.mctp_info; + host_info->major_cmd = mctp_in->huawei_cmd; + host_info->sub_cmd = mctp_in->sub_cmd; + host_info->data = mctp_out->data; + + event_info.type = HIFC_EVENT_MCTP_GET_HOST_INFO; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + mctp_out->actual_len = host_info->data_len; +} + +char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {"RS-FEC", "BASE-FEC", "NO-FEC"}; + +char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = { + "Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC", + "Back plane", "BaseT" +}; + +static void __get_port_type(struct hifc_hwdev *hwdev, + struct hifc_link_info *info, char **port_type) +{ + if (info->cable_absent) { + sdk_info(hwdev->dev_hdl, "Cable unpresent\n"); + return; + } + + if (info->port_type < LINK_PORT_MAX_TYPE) + *port_type = __hw_to_char_port_type[info->port_type]; + else + sdk_info(hwdev->dev_hdl, "Unknown port type: %u\n", + info->port_type); + if (info->port_type == LINK_PORT_FIBRE) { + if (info->port_sub_type == FIBRE_SUBTYPE_SR) + *port_type = "Fibre-SR"; + else if (info->port_sub_type == FIBRE_SUBTYPE_LR) + *port_type = "Fibre-LR"; + } +} + +static void __print_cable_info(struct hifc_hwdev *hwdev, + struct hifc_link_info *info) +{ + char tmp_str[512] = {0}; + char tmp_vendor[17] = {0}; + char *port_type = "Unknown port type"; + int i; + + __get_port_type(hwdev, info, &port_type); + + for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) { + if (info->vendor_name[i] == ' ') + info->vendor_name[i] = '\0'; + else + break; + } + + memcpy(tmp_vendor, info->vendor_name, + sizeof(info->vendor_name)); + snprintf(tmp_str, sizeof(tmp_str) - 1, + "Vendor: %s, %s, length: %um, max_speed: %uGbps", + tmp_vendor, port_type, info->cable_length, + info->cable_max_speed); + if (info->port_type == LINK_PORT_FIBRE || + info->port_type == LINK_PORT_AOC) { + snprintf(tmp_str, sizeof(tmp_str) - 1, + "%s, %s, Temperature: %u", tmp_str, + info->sfp_type ? "SFP" : "QSFP", info->cable_temp); + if (info->sfp_type) { + snprintf(tmp_str, sizeof(tmp_str) - 1, + "%s, rx power: %uuW, tx power: %uuW", + tmp_str, info->power[0], info->power[1]); + } else { + snprintf(tmp_str, sizeof(tmp_str) - 1, + "%s, rx power: %uuw %uuW %uuW %uuW", + tmp_str, info->power[0], info->power[1], + info->power[2], info->power[3]); + } + } + + sdk_info(hwdev->dev_hdl, "Cable information: %s\n", + tmp_str); +} + +static void __hi30_lane_info(struct hifc_hwdev *hwdev, + struct hilink_lane *lane) +{ + struct hi30_ffe_data *ffe_data; + struct hi30_ctle_data *ctle_data; + + ffe_data = (struct hi30_ffe_data *)lane->hi30_ffe; + ctle_data = (struct hi30_ctle_data *)lane->hi30_ctle; + + sdk_info(hwdev->dev_hdl, "TX_FFE: PRE1=%s%d; PRE2=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d\n", + (ffe_data->PRE1 & 0x10) ? "-" : "", + (int)(ffe_data->PRE1 & 0xf), + (ffe_data->PRE2 & 0x10) ? "-" : "", + (int)(ffe_data->PRE2 & 0xf), + (int)ffe_data->MAIN, + (ffe_data->POST1 & 0x10) ? "-" : "", + (int)(ffe_data->POST1 & 0xf), + (ffe_data->POST2 & 0x10) ? "-" : "", + (int)(ffe_data->POST2 & 0xf)); + sdk_info(hwdev->dev_hdl, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u\n", + ctle_data->ctlebst[0], ctle_data->ctlebst[1], + ctle_data->ctlebst[2], ctle_data->ctlecmband[0], + ctle_data->ctlecmband[1], ctle_data->ctlecmband[2], + ctle_data->ctlermband[0], ctle_data->ctlermband[1], + ctle_data->ctlermband[2], ctle_data->ctleza[0], + ctle_data->ctleza[1], ctle_data->ctleza[2]); +} + +static void __print_hi30_status(struct hifc_hwdev *hwdev, + struct hifc_link_info *info) +{ + struct hilink_lane *lane; + int lane_used_num = 0, i; + + for (i = 0; i < HILINK_MAX_LANE; i++) { + lane = (struct hilink_lane *)(info->lane2 + i * sizeof(*lane)); + if (!lane->lane_used) + continue; + + __hi30_lane_info(hwdev, lane); + lane_used_num++; + } + + /* in new firmware, all lane info setted in lane2 */ + if (lane_used_num) + return; + + /* compatible old firmware */ + __hi30_lane_info(hwdev, (struct hilink_lane *)info->lane1); +} + +static void __print_link_info(struct hifc_hwdev *hwdev, + struct hifc_link_info *info, + enum hilink_info_print_event type) +{ + char *fec = "None"; + + if (info->fec < HILINK_FEC_MAX_TYPE) + fec = __hw_to_char_fec[info->fec]; + else + sdk_info(hwdev->dev_hdl, "Unknown fec type: %u\n", + info->fec); + + if (type == HILINK_EVENT_LINK_UP || !info->an_state) { + sdk_info(hwdev->dev_hdl, "Link information: speed %dGbps, %s, autoneg %s\n", + info->speed, fec, info->an_state ? "on" : "off"); + } else { + sdk_info(hwdev->dev_hdl, "Link information: antoneg: %s\n", + info->an_state ? "on" : "off"); + } +} + +static char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = { + "", "link up", "link down", "cable plugged" +}; + +void print_hilink_info(struct hifc_hwdev *hwdev, + enum hilink_info_print_event type, + struct hifc_link_info *info) +{ + __print_cable_info(hwdev, info); + + __print_link_info(hwdev, info, type); + + __print_hi30_status(hwdev, info); + + if (type == HILINK_EVENT_LINK_UP) + return; + + if (type == HILINK_EVENT_CABLE_PLUGGED) { + sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u\n", + info->alos, info->rx_los); + return; + } + + sdk_info(hwdev->dev_hdl, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug info reg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x\n", + info->pma_status == 1 ? "off" : "on", + info->mac_tx_en ? "enable" : "disable", + info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg, + info->pma_signal_ok_reg, info->rf_lf_status_reg); + sdk_info(hwdev->dev_hdl, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x, PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x\n", + info->alos, info->rx_los, info->pcs_err_blk_cnt_reg, + info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt); +} + +static void hifc_print_hilink_info(struct hifc_hwdev *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hifc_hilink_link_info *hilink_info = buf_in; + struct hifc_link_info *info; + enum hilink_info_print_event type; + + if (in_size != sizeof(*hilink_info)) { + sdk_err(hwdev->dev_hdl, "Invalid hilink info message size %d, should be %ld\n", + in_size, sizeof(*hilink_info)); + return; + } + + ((struct hifc_hilink_link_info *)buf_out)->status = 0; + *out_size = sizeof(*hilink_info); + + info = &hilink_info->info; + type = hilink_info->info_type; + + if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) { + sdk_info(hwdev->dev_hdl, "Invalid hilink info report, type: %d\n", + type); + return; + } + + sdk_info(hwdev->dev_hdl, "Hilink info report after %s\n", + hilink_info_report_type[type]); + + print_hilink_info(hwdev, type, info); +} + +static void __port_sfp_info_event(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_cmd_get_sfp_qsfp_info *sfp_info = buf_in; + struct hifc_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + if (in_size != sizeof(*sfp_info)) { + sdk_err(hwdev->dev_hdl, "Invalid sfp info cmd, length: %d, should be %ld\n", + in_size, sizeof(*sfp_info)); + return; + } + + if (sfp_info->port_id >= HIFC_MAX_PORT_ID) { + sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n", + sfp_info->port_id, HIFC_MAX_PORT_ID - 1); + return; + } + + if (!chip_node->rt_cmd) + return; + + rt_cmd = &chip_node->rt_cmd[sfp_info->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(&rt_cmd->sfp_info, sfp_info, sizeof(rt_cmd->sfp_info)); + rt_cmd->up_send_sfp_info = true; + mutex_unlock(&chip_node->sfp_mutex); +} + +static void __port_sfp_abs_event(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_cmd_get_light_module_abs *sfp_abs = buf_in; + struct hifc_port_routine_cmd *rt_cmd; + struct card_node *chip_node = hwdev->chip_node; + + if (in_size != sizeof(*sfp_abs)) { + sdk_err(hwdev->dev_hdl, "Invalid sfp absent cmd, length: %d, should be %ld\n", + in_size, sizeof(*sfp_abs)); + return; + } + + if (sfp_abs->port_id >= HIFC_MAX_PORT_ID) { + sdk_err(hwdev->dev_hdl, "Invalid sfp port id: %d, max port is %d\n", + sfp_abs->port_id, HIFC_MAX_PORT_ID - 1); + return; + } + + if (!chip_node->rt_cmd) + return; + + rt_cmd = &chip_node->rt_cmd[sfp_abs->port_id]; + mutex_lock(&chip_node->sfp_mutex); + memcpy(&rt_cmd->abs, sfp_abs, sizeof(rt_cmd->abs)); + rt_cmd->up_send_sfp_abs = true; + mutex_unlock(&chip_node->sfp_mutex); +} + +static void mgmt_heartbeat_enhanced_event(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_heartbeat_event *hb_event = buf_in; + struct hifc_heartbeat_event *hb_event_out = buf_out; + struct hifc_hwdev *dev = hwdev; + + if (in_size != sizeof(*hb_event)) { + sdk_err(dev->dev_hdl, "Invalid data size from mgmt for heartbeat event: %d\n", + in_size); + return; + } + + if (dev->heartbeat_ehd.last_heartbeat != hb_event->heart) { + dev->heartbeat_ehd.last_update_jiffies = jiffies; + dev->heartbeat_ehd.last_heartbeat = hb_event->heart; + } + + hb_event_out->drv_heart = HEARTBEAT_DRV_MAGIC_ACK; + + hb_event_out->status = 0; + *out_size = sizeof(*hb_event_out); +} + +struct dev_event_handler { + u8 mod; + u8 cmd; + void (*handler)(struct hifc_hwdev *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +}; + +struct dev_event_handler dev_cmd_handler[] = { + { + .mod = HIFC_MOD_L2NIC, + .cmd = HIFC_PORT_CMD_GET_SFP_INFO, + .handler = __port_sfp_info_event, + }, + + { + .mod = HIFC_MOD_L2NIC, + .cmd = HIFC_PORT_CMD_GET_SFP_ABS, + .handler = __port_sfp_abs_event, + }, + + { + .mod = HIFC_MOD_HILINK, + .cmd = HIFC_HILINK_CMD_GET_LINK_INFO, + .handler = hifc_print_hilink_info, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_FAULT_REPORT, + .handler = fault_event_handler, + }, + + { + .mod = HIFC_MOD_L2NIC, + .cmd = HIFC_MGMT_CMD_HEART_LOST_REPORT, + .handler = heartbeat_lost_event_handler, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_WATCHDOG_INFO, + .handler = mgmt_watchdog_timeout_event_handler, + }, + + { + .mod = HIFC_MOD_L2NIC, + .cmd = HIFC_PORT_CMD_MGMT_RESET, + .handler = mgmt_reset_event_handler, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_FMW_ACT_NTC, + .handler = hifc_fmw_act_ntc_handler, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_PCIE_DFX_NTC, + .handler = hifc_pcie_dfx_event_handler, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_GET_HOST_INFO, + .handler = hifc_mctp_get_host_info_event_handler, + }, + + { + .mod = HIFC_MOD_COMM, + .cmd = HIFC_MGMT_CMD_HEARTBEAT_EVENT, + .handler = mgmt_heartbeat_enhanced_event, + }, +}; + +/* public process for this event: + * pf link change event + * pf heart lost event ,TBD + * pf fault report event + * vf link change event + * vf heart lost event, TBD + * vf fault report event, TBD + */ +static void _event_handler(struct hifc_hwdev *hwdev, enum hifc_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + u32 i, size = sizeof(dev_cmd_handler) / sizeof(dev_cmd_handler[0]); + + if (!hwdev) + return; + + *out_size = 0; + + for (i = 0; i < size; i++) { + if (cmd == dev_cmd_handler[i].cmd && + mod == dev_cmd_handler[i].mod) { + dev_cmd_handler[i].handler(hwdev, buf_in, in_size, + buf_out, out_size); + break; + } + } + + /* can't find this event cmd */ + if (i == size) + sdk_warn(hwdev->dev_hdl, "Unsupported mod(%d) event cmd(%d) to process\n", + mod, cmd); +} + +/* pf link change event */ +static void pf_nic_event_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_L2NIC, cmd, buf_in, in_size, + buf_out, out_size); +} + +static void pf_hilink_event_handler(void *hwdev, void *pri_handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_HILINK, cmd, buf_in, in_size, + buf_out, out_size); +} + +/* pf fault report event */ +void pf_fault_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_FAULT_REPORT, + buf_in, in_size, buf_out, out_size); +} + +void mgmt_watchdog_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_WATCHDOG_INFO, + buf_in, in_size, buf_out, out_size); +} + +void mgmt_fmw_act_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_FMW_ACT_NTC, + buf_in, in_size, buf_out, out_size); +} + +void mgmt_pcie_dfx_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_PCIE_DFX_NTC, + buf_in, in_size, buf_out, out_size); +} + +void mgmt_get_mctp_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_GET_HOST_INFO, + buf_in, in_size, buf_out, out_size); +} + +void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, HIFC_MOD_COMM, HIFC_MGMT_CMD_HEARTBEAT_EVENT, + buf_in, in_size, buf_out, out_size); +} + +static void pf_event_register(struct hifc_hwdev *hwdev) +{ + if (hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED)) { + hifc_register_mgmt_msg_cb(hwdev, HIFC_MOD_L2NIC, + hwdev, pf_nic_event_handler); + hifc_register_mgmt_msg_cb(hwdev, HIFC_MOD_HILINK, + hwdev, + pf_hilink_event_handler); + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, + HIFC_MGMT_CMD_FAULT_REPORT, + pf_fault_event_handler); + + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, + HIFC_MGMT_CMD_WATCHDOG_INFO, + mgmt_watchdog_event_handler); + + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, + HIFC_MGMT_CMD_FMW_ACT_NTC, + mgmt_fmw_act_event_handler); + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, + HIFC_MGMT_CMD_PCIE_DFX_NTC, + mgmt_pcie_dfx_event_handler); + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, + HIFC_MGMT_CMD_GET_HOST_INFO, + mgmt_get_mctp_event_handler); + } +} + +void hifc_event_register(void *dev, void *pri_handle, + hifc_event_handler callback) +{ + struct hifc_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for register event\n"); + return; + } + + hwdev->event_callback = callback; + hwdev->event_pri_handle = pri_handle; + + pf_event_register(hwdev); +} + +void hifc_event_unregister(void *dev) +{ + struct hifc_hwdev *hwdev = dev; + + hwdev->event_callback = NULL; + hwdev->event_pri_handle = NULL; + + hifc_unregister_mgmt_msg_cb(hwdev, HIFC_MOD_L2NIC); + hifc_unregister_mgmt_msg_cb(hwdev, HIFC_MOD_HILINK); + hifc_comm_recv_up_self_cmd_unreg(hwdev, + HIFC_MGMT_CMD_FAULT_REPORT); + hifc_comm_recv_up_self_cmd_unreg(hwdev, + HIFC_MGMT_CMD_WATCHDOG_INFO); + hifc_comm_recv_up_self_cmd_unreg(hwdev, + HIFC_MGMT_CMD_FMW_ACT_NTC); + hifc_comm_recv_up_self_cmd_unreg(hwdev, + HIFC_MGMT_CMD_PCIE_DFX_NTC); + hifc_comm_recv_up_self_cmd_unreg(hwdev, + HIFC_MGMT_CMD_GET_HOST_INFO); +} + +/* 0 - heartbeat lost, 1 - normal */ +static u8 hifc_get_heartbeat_status(struct hifc_hwdev *hwdev) +{ + struct hifc_hwif *hwif = hwdev->hwif; + u32 attr1; + + /* suprise remove should be set 1 */ + if (!hifc_get_chip_present_flag(hwdev)) + return 1; + + attr1 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR1_ADDR); + if (attr1 == HIFC_PCIE_LINK_DOWN) { + sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); + hifc_set_chip_absent(hwdev); + hifc_force_complete_all(hwdev); + /* should notify chiperr to pangea + * when detecting pcie link down + */ + return 1; + } + + return HIFC_AF1_GET(attr1, MGMT_INIT_STATUS); +} + +static void hifc_heartbeat_event_handler(struct work_struct *work) +{ + struct hifc_hwdev *hwdev = + container_of(work, struct hifc_hwdev, timer_work); + u16 out = 0; + + _event_handler(hwdev, HIFC_MOD_L2NIC, HIFC_MGMT_CMD_HEART_LOST_REPORT, + NULL, 0, &out, &out); +} + +static bool __detect_heartbeat_ehd_lost(struct hifc_hwdev *hwdev) +{ + struct hifc_heartbeat_enhanced *hb_ehd = &hwdev->heartbeat_ehd; + u64 update_time; + bool hb_ehd_lost = false; + + if (!hb_ehd->en) + return false; + + if (time_after(jiffies, hb_ehd->start_detect_jiffies)) { + update_time = jiffies_to_msecs(jiffies - + hb_ehd->last_update_jiffies); + if (update_time > HIFC_HEARBEAT_ENHANCED_LOST) { + sdk_warn(hwdev->dev_hdl, "Heartbeat enhanced lost for %d millisecond\n", + (u32)update_time); + hb_ehd_lost = true; + } + } else { + /* mgmt may not report heartbeart enhanced event and won't + * update last_update_jiffies + */ + hb_ehd->last_update_jiffies = jiffies; + } + + return hb_ehd_lost; +} + +static void hifc_heartbeat_timer_handler(struct timer_list *t) +{ + struct hifc_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer); + + if (__detect_heartbeat_ehd_lost(hwdev) || + !hifc_get_heartbeat_status(hwdev)) { + hwdev->heartbeat_lost = 1; + queue_work(hwdev->workq, &hwdev->timer_work); + } else { + mod_timer(&hwdev->heartbeat_timer, + jiffies + msecs_to_jiffies(HIFC_HEARTBEAT_PERIOD)); + } +} + +void add_to_timer(struct timer_list *timer, long period) +{ + if (!timer) + return; + + add_timer(timer); +} + +void delete_timer(struct timer_list *timer) +{ + if (!timer) + return; + + del_timer_sync(timer); +} + +void hifc_init_heartbeat(struct hifc_hwdev *hwdev) +{ + timer_setup(&hwdev->heartbeat_timer, hifc_heartbeat_timer_handler, 0); + hwdev->heartbeat_timer.expires = + jiffies + msecs_to_jiffies(HIFC_HEARTBEAT_START_EXPIRE); + + add_to_timer(&hwdev->heartbeat_timer, HIFC_HEARTBEAT_PERIOD); + + INIT_WORK(&hwdev->timer_work, hifc_heartbeat_event_handler); +} + +void hifc_destroy_heartbeat(struct hifc_hwdev *hwdev) +{ + delete_timer(&hwdev->heartbeat_timer); +} + +u8 hifc_nic_sw_aeqe_handler(void *handle, u8 event, u64 data) +{ + struct hifc_hwdev *hwdev = (struct hifc_hwdev *)handle; + u8 event_level = FAULT_LEVEL_MAX; + + switch (event) { + case HIFC_INTERNAL_TSO_FATAL_ERROR: + case HIFC_INTERNAL_LRO_FATAL_ERROR: + case HIFC_INTERNAL_TX_FATAL_ERROR: + case HIFC_INTERNAL_RX_FATAL_ERROR: + case HIFC_INTERNAL_OTHER_FATAL_ERROR: + atomic_inc(&hwdev->hw_stats.nic_ucode_event_stats[event]); + sdk_err(hwdev->dev_hdl, "SW aeqe event type: 0x%x, data: 0x%llx\n", + event, data); + event_level = FAULT_LEVEL_FATAL; + break; + default: + sdk_err(hwdev->dev_hdl, "Unsupported sw event %d to process.\n", + event); + } + + return event_level; +} + +void hifc_set_pcie_order_cfg(void *handle) +{ + struct hifc_hwdev *hwdev = handle; + u32 val; + + if (!hwdev) + return; + + val = hifc_hwif_read_reg(hwdev->hwif, + HIFC_GLB_DMA_SO_RO_REPLACE_ADDR); + + if (HIFC_GLB_DMA_SO_RO_GET(val, SO_RO_CFG)) { + val = HIFC_GLB_DMA_SO_R0_CLEAR(val, SO_RO_CFG); + val |= HIFC_GLB_DMA_SO_R0_SET(HIFC_DISABLE_ORDER, SO_RO_CFG); + hifc_hwif_write_reg(hwdev->hwif, + HIFC_GLB_DMA_SO_RO_REPLACE_ADDR, val); + } +} + +int hifc_get_board_info(void *hwdev, struct hifc_board_info *info) +{ + struct hifc_comm_board_info board_info = {0}; + u16 out_size = sizeof(board_info); + int err; + + if (!hwdev || !info) + return -EINVAL; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_GET_BOARD_INFO, + &board_info, sizeof(board_info), + &board_info, &out_size, 0); + if (err || board_info.status || !out_size) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n", + err, board_info.status, out_size); + return -EFAULT; + } + + memcpy(info, &board_info.info, sizeof(*info)); + + return 0; +} + +int hifc_get_phy_init_status(void *hwdev, + enum phy_init_status_type *init_status) +{ + struct hifc_phy_init_status phy_info = {0}; + u16 out_size = sizeof(phy_info); + int err; + + if (!hwdev || !init_status) + return -EINVAL; + + err = hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_GET_PHY_INIT_STATUS, + &phy_info, sizeof(phy_info), + &phy_info, &out_size, 0); + if ((phy_info.status != HIFC_MGMT_CMD_UNSUPPORTED && + phy_info.status) || err || !out_size) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to get phy info, err: %d, status: 0x%x, out size: 0x%x\n", + err, phy_info.status, out_size); + return -EFAULT; + } + + *init_status = phy_info.init_status; + + return phy_info.status; +} + +int hifc_phy_init_status_judge(void *hwdev) +{ + enum phy_init_status_type init_status; + int ret; + unsigned long end; + + /* It's not a phy, so don't judge phy status */ + if (!HIFC_BOARD_IS_PHY((struct hifc_hwdev *)hwdev)) + return 0; + + end = jiffies + msecs_to_jiffies(PHY_DOING_INIT_TIMEOUT); + do { + ret = hifc_get_phy_init_status(hwdev, &init_status); + if (ret == HIFC_MGMT_CMD_UNSUPPORTED) + return 0; + else if (ret) + return -EFAULT; + + switch (init_status) { + case PHY_INIT_SUCCESS: + sdk_info(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Phy init is success\n"); + return 0; + case PHY_NONSUPPORT: + sdk_info(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Phy init is nonsupport\n"); + return 0; + case PHY_INIT_FAIL: + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Phy init is failed\n"); + return -EIO; + case PHY_INIT_DOING: + msleep(250); + break; + default: + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Phy init is invalid, init_status: %d\n", + init_status); + return -EINVAL; + } + } while (time_before(jiffies, end)); + + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Phy init is timeout\n"); + + return -ETIMEDOUT; +} + +int hifc_get_mgmt_channel_status(void *handle) +{ + struct hifc_hwdev *hwdev = handle; + u32 val; + + if (!hwdev) + return true; + + if (hifc_func_type(hwdev) == TYPE_VF || + !(hwdev->feature_cap & HIFC_FUNC_SUPP_DFX_REG)) + return false; + + val = hifc_hwif_read_reg(hwdev->hwif, HIFC_ICPL_RESERVD_ADDR); + + return HIFC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS); +} + +#define HIFC_RED_REG_TIME_OUT 3000 + +int hifc_read_reg(void *hwdev, u32 reg_addr, u32 *val) +{ + struct hifc_reg_info reg_info = {0}; + u16 out_size = sizeof(reg_info); + int err; + + if (!hwdev || !val) + return -EINVAL; + + reg_info.reg_addr = reg_addr; + reg_info.val_length = sizeof(u32); + + err = hifc_pf_msg_to_mgmt_sync(hwdev, HIFC_MOD_COMM, + HIFC_MGMT_CMD_REG_READ, + ®_info, sizeof(reg_info), + ®_info, &out_size, + HIFC_RED_REG_TIME_OUT); + if (reg_info.status || err || !out_size) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Failed to read reg, err: %d, status: 0x%x, out size: 0x%x\n", + err, reg_info.status, out_size); + return -EFAULT; + } + + *val = reg_info.data[0]; + + return 0; +} + +void hifc_swe_fault_handler(struct hifc_hwdev *hwdev, u8 level, + u8 event, u64 val) +{ + struct hifc_fault_info_node *fault_node; + + if (level < FAULT_LEVEL_MAX) { + fault_node = kzalloc(sizeof(*fault_node), GFP_KERNEL); + if (!fault_node) { + sdk_err(hwdev->dev_hdl, "Malloc fault node memory failed\n"); + return; + } + + fault_node->info.fault_src = HIFC_FAULT_SRC_SW_MGMT_UCODE; + fault_node->info.fault_lev = level; + fault_node->info.fault_data.sw_mgmt.event_id = event; + fault_node->info.fault_data.sw_mgmt.event_data = val; + hifc_refresh_history_fault(hwdev, &fault_node->info); + + down(&hwdev->fault_list_sem); + kfree(fault_node); + up(&hwdev->fault_list_sem); + } +} + +void hifc_set_func_deinit_flag(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + set_bit(HIFC_HWDEV_FUNC_DEINIT, &dev->func_state); +} + +int hifc_get_card_present_state(void *hwdev, bool *card_present_state) +{ + u32 addr, attr1; + + if (!hwdev || !card_present_state) + return -EINVAL; + + addr = HIFC_CSR_FUNC_ATTR1_ADDR; + attr1 = hifc_hwif_read_reg(((struct hifc_hwdev *)hwdev)->hwif, addr); + if (attr1 == HIFC_PCIE_LINK_DOWN) { + sdk_warn(((struct hifc_hwdev *)hwdev)->dev_hdl, "Card is not present\n"); + *card_present_state = (bool)0; + } else { + *card_present_state = (bool)1; + } + + return 0; +} + +void hifc_disable_mgmt_msg_report(void *hwdev) +{ + struct hifc_hwdev *hw_dev = (struct hifc_hwdev *)hwdev; + + hifc_set_pf_status(hw_dev->hwif, HIFC_PF_STATUS_INIT); +} + diff --git a/drivers/scsi/huawei/hifc/hifc_hwdev.h b/drivers/scsi/huawei/hifc/hifc_hwdev.h new file mode 100644 index 000000000000..6ebf59b31fb8 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hwdev.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_HWDEV_H_ +#define HIFC_HWDEV_H_ + +/* to use 0-level CLA, page size must be: 64B(wqebb) * 4096(max_q_depth) */ +#define HIFC_DEFAULT_WQ_PAGE_SIZE 0x40000 +#define HIFC_HW_WQ_PAGE_SIZE 0x1000 + +#define HIFC_MSG_TO_MGMT_MAX_LEN 2016 + +#define HIFC_MGMT_STATUS_ERR_OK 0 /* Ok */ +#define HIFC_MGMT_STATUS_ERR_PARAM 1 /* Invalid parameter */ +#define HIFC_MGMT_STATUS_ERR_FAILED 2 /* Operation failed */ +#define HIFC_MGMT_STATUS_ERR_PORT 3 /* Invalid port */ +#define HIFC_MGMT_STATUS_ERR_TIMEOUT 4 /* Operation time out */ +#define HIFC_MGMT_STATUS_ERR_NOMATCH 5 /* Version not match */ +#define HIFC_MGMT_STATUS_ERR_EXIST 6 /* Entry exists */ +#define HIFC_MGMT_STATUS_ERR_NOMEM 7 /* Out of memory */ +#define HIFC_MGMT_STATUS_ERR_INIT 8 /* Feature not initialized */ +#define HIFC_MGMT_STATUS_ERR_FAULT 9 /* Invalid address */ +#define HIFC_MGMT_STATUS_ERR_PERM 10 /* Operation not permitted */ +#define HIFC_MGMT_STATUS_ERR_EMPTY 11 /* Table empty */ +#define HIFC_MGMT_STATUS_ERR_FULL 12 /* Table full */ +#define HIFC_MGMT_STATUS_ERR_NOT_FOUND 13 /* Not found */ +#define HIFC_MGMT_STATUS_ERR_BUSY 14 /* Device or resource busy */ +#define HIFC_MGMT_STATUS_ERR_RESOURCE 15 /* No resources for operation */ +#define HIFC_MGMT_STATUS_ERR_CONFIG 16 /* Invalid configuration */ +#define HIFC_MGMT_STATUS_ERR_UNAVAIL 17 /* Feature unavailable */ +#define HIFC_MGMT_STATUS_ERR_CRC 18 /* CRC check failed */ +#define HIFC_MGMT_STATUS_ERR_NXIO 19 /* No such device or address */ +#define HIFC_MGMT_STATUS_ERR_ROLLBACK 20 /* Chip rollback fail */ +#define HIFC_MGMT_STATUS_ERR_LEN 32 /* Length too short or too long */ +#define HIFC_MGMT_STATUS_ERR_UNSUPPORT 0xFF /* Feature not supported*/ +/* Qe buffer relates define */ + +enum hifc_rx_buf_size { + HIFC_RX_BUF_SIZE_32B = 0x20, + HIFC_RX_BUF_SIZE_64B = 0x40, + HIFC_RX_BUF_SIZE_96B = 0x60, + HIFC_RX_BUF_SIZE_128B = 0x80, + HIFC_RX_BUF_SIZE_192B = 0xC0, + HIFC_RX_BUF_SIZE_256B = 0x100, + HIFC_RX_BUF_SIZE_384B = 0x180, + HIFC_RX_BUF_SIZE_512B = 0x200, + HIFC_RX_BUF_SIZE_768B = 0x300, + HIFC_RX_BUF_SIZE_1K = 0x400, + HIFC_RX_BUF_SIZE_1_5K = 0x600, + HIFC_RX_BUF_SIZE_2K = 0x800, + HIFC_RX_BUF_SIZE_3K = 0xC00, + HIFC_RX_BUF_SIZE_4K = 0x1000, + HIFC_RX_BUF_SIZE_8K = 0x2000, + HIFC_RX_BUF_SIZE_16K = 0x4000, +}; + +enum hifc_res_state { + HIFC_RES_CLEAN = 0, + HIFC_RES_ACTIVE = 1, +}; + +enum ppf_tmr_status { + HIFC_PPF_TMR_FLAG_STOP, + HIFC_PPF_TMR_FLAG_START, +}; + +struct cfg_mgmt_info; +struct hifc_hwif; +struct hifc_wqs; +struct hifc_aeqs; +struct hifc_ceqs; +struct hifc_msg_pf_to_mgmt; +struct hifc_cmdqs; + +struct hifc_root_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd1; + u8 set_cmdq_depth; + u8 cmdq_depth; + u8 lro_en; + u8 rsvd2; + u8 ppf_idx; + u8 rsvd3; + u16 rq_depth; + u16 rx_buf_sz; + u16 sq_depth; +}; + +struct hifc_page_addr { + void *virt_addr; + u64 phys_addr; +}; + +#define HIFC_PCIE_LINK_DOWN 0xFFFFFFFF + +#define HIFC_DEV_ACTIVE_FW_TIMEOUT (35 * 1000) +#define HIFC_DEV_BUSY_ACTIVE_FW 0xFE + +#define HIFC_HW_WQ_NAME "hifc_hardware" +#define HIFC_HEARTBEAT_PERIOD 1000 +#define HIFC_HEARTBEAT_START_EXPIRE 5000 + +#define HIFC_CHIP_ERROR_TYPE_MAX 1024 +#define HIFC_CHIP_FAULT_SIZE \ + (HIFC_NODE_ID_MAX * FAULT_LEVEL_MAX * HIFC_CHIP_ERROR_TYPE_MAX) + +#define HIFC_CSR_DMA_ATTR_TBL_BASE 0xC80 +#define HIFC_CSR_DMA_ATTR_TBL_STRIDE 0x4 +#define HIFC_CSR_DMA_ATTR_TBL_ADDR(idx) \ + (HIFC_CSR_DMA_ATTR_TBL_BASE \ + + (idx) * HIFC_CSR_DMA_ATTR_TBL_STRIDE) + +/* MSI-X registers */ +#define HIFC_CSR_MSIX_CNT_BASE 0x2004 +#define HIFC_CSR_MSIX_STRIDE 0x8 + +#define HIFC_CSR_MSIX_CNT_ADDR(idx) \ + (HIFC_CSR_MSIX_CNT_BASE + (idx) * HIFC_CSR_MSIX_STRIDE) + +enum hifc_node_id { + HIFC_NODE_ID_IPSU = 4, + HIFC_NODE_ID_MGMT_HOST = 21, /*Host CPU send API to uP */ + HIFC_NODE_ID_MAX = 22 +}; + +#define HIFC_HWDEV_INIT_MODES_MASK ((1UL << HIFC_HWDEV_ALL_INITED) - 1) + +enum hifc_hwdev_func_state { + HIFC_HWDEV_FUNC_INITED = HIFC_HWDEV_ALL_INITED, + HIFC_HWDEV_FUNC_DEINIT, + HIFC_HWDEV_STATE_BUSY = 31, +}; + +struct hifc_cqm_stats { + atomic_t cqm_cmd_alloc_cnt; + atomic_t cqm_cmd_free_cnt; + atomic_t cqm_send_cmd_box_cnt; + atomic_t cqm_db_addr_alloc_cnt; + atomic_t cqm_db_addr_free_cnt; + atomic_t cqm_fc_srq_create_cnt; + atomic_t cqm_qpc_mpt_create_cnt; + atomic_t cqm_nonrdma_queue_create_cnt; + atomic_t cqm_qpc_mpt_delete_cnt; + atomic_t cqm_nonrdma_queue_delete_cnt; + atomic_t cqm_aeq_callback_cnt[112]; +}; + +struct hifc_link_event_stats { + atomic_t link_down_stats; + atomic_t link_up_stats; +}; + +struct hifc_fault_event_stats { + atomic_t chip_fault_stats[HIFC_NODE_ID_MAX][FAULT_LEVEL_MAX]; + atomic_t fault_type_stat[FAULT_TYPE_MAX]; + atomic_t pcie_fault_stats; +}; + +struct hifc_hw_stats { + atomic_t heart_lost_stats; + atomic_t nic_ucode_event_stats[HIFC_NIC_FATAL_ERROR_MAX]; + struct hifc_cqm_stats cqm_stats; + struct hifc_link_event_stats link_event_stats; + struct hifc_fault_event_stats fault_event_stats; +}; + +struct hifc_fault_info_node { + struct list_head list; + struct hifc_hwdev *hwdev; + struct hifc_fault_recover_info info; +}; + +enum heartbeat_support_state { + HEARTBEAT_NOT_SUPPORT = 0, + HEARTBEAT_SUPPORT, +}; + +/* 25s for max 5 heartbeat event lost */ +#define HIFC_HEARBEAT_ENHANCED_LOST 25000 +struct hifc_heartbeat_enhanced { + bool en; /* enable enhanced heartbeat or not */ + + unsigned long last_update_jiffies; + u32 last_heartbeat; + + unsigned long start_detect_jiffies; +}; + +#define HIFC_CMD_VER_FUNC_ID 2 +#define HIFC_GLB_DMA_SO_RO_REPLACE_ADDR 0x488C +#define HIFC_ICPL_RESERVD_ADDR 0x9204 + +#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size)\ + hifc_msg_to_mgmt_sync(hwdev, HIFC_MOD_L2NIC, cmd, \ + buf_in, in_size, \ + buf_out, out_size, 0) + +struct hifc_hwdev { + void *adapter_hdl; /* pointer to hifc_pcidev or NDIS_Adapter */ + void *pcidev_hdl; /* pointer to pcidev or Handler */ + void *dev_hdl; /* pointer to pcidev->dev or Handler, for + * sdk_err() or dma_alloc() + */ + u32 wq_page_size; + + void *cqm_hdl; + void *chip_node; + + struct hifc_hwif *hwif; /* include void __iomem *bar */ + struct cfg_mgmt_info *cfg_mgmt; + struct hifc_wqs *wqs; /* for FC slq */ + + struct hifc_aeqs *aeqs; + struct hifc_ceqs *ceqs; + + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt; + + struct hifc_cmdqs *cmdqs; + + struct hifc_page_addr page_pa0; + struct hifc_page_addr page_pa1; + + hifc_event_handler event_callback; + void *event_pri_handle; + bool history_fault_flag; + struct hifc_fault_recover_info history_fault; + struct semaphore fault_list_sem; + + struct work_struct timer_work; + struct workqueue_struct *workq; + struct timer_list heartbeat_timer; + /* true represent heartbeat lost, false represent heartbeat restore */ + u32 heartbeat_lost; + int chip_present_flag; + struct hifc_heartbeat_enhanced heartbeat_ehd; + struct hifc_hw_stats hw_stats; + u8 *chip_fault_stats; + + u32 statufull_ref_cnt; + ulong func_state; + + u64 feature_cap; /* enum hifc_func_cap */ + + /* In bmgw x86 host, driver can't send message to mgmt cpu directly, + * need to trasmit message ppf mbox to bmgw arm host. + */ + + struct hifc_board_info board_info; +}; + +int hifc_init_comm_ch(struct hifc_hwdev *hwdev); +void hifc_uninit_comm_ch(struct hifc_hwdev *hwdev); + +enum hifc_set_arm_type { + HIFC_SET_ARM_CMDQ, + HIFC_SET_ARM_SQ, + HIFC_SET_ARM_TYPE_NUM, +}; + +/* up to driver event */ +#define HIFC_PORT_CMD_MGMT_RESET 0x0 +struct hifc_vport_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u8 state; + u8 rsvd2[3]; +}; + +struct hifc_l2nic_reset { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 reset_flag; +}; + +/* HILINK module interface */ + +/* cmd of mgmt CPU message for HILINK module */ +enum hifc_hilink_cmd { + HIFC_HILINK_CMD_GET_LINK_INFO = 0x3, + HIFC_HILINK_CMD_SET_LINK_SETTINGS = 0x8, +}; + +enum hilink_info_print_event { + HILINK_EVENT_LINK_UP = 1, + HILINK_EVENT_LINK_DOWN, + HILINK_EVENT_CABLE_PLUGGED, + HILINK_EVENT_MAX_TYPE, +}; + +enum hifc_link_port_type { + LINK_PORT_FIBRE = 1, + LINK_PORT_ELECTRIC, + LINK_PORT_COPPER, + LINK_PORT_AOC, + LINK_PORT_BACKPLANE, + LINK_PORT_BASET, + LINK_PORT_MAX_TYPE, +}; + +enum hilink_fibre_subtype { + FIBRE_SUBTYPE_SR = 1, + FIBRE_SUBTYPE_LR, + FIBRE_SUBTYPE_MAX, +}; + +enum hilink_fec_type { + HILINK_FEC_RSFEC, + HILINK_FEC_BASEFEC, + HILINK_FEC_NOFEC, + HILINK_FEC_MAX_TYPE, +}; + +/* cmd of mgmt CPU message */ +enum hifc_port_cmd { + HIFC_PORT_CMD_SET_MAC = 0x9, + HIFC_PORT_CMD_GET_AUTONEG_CAP = 0xf, + HIFC_PORT_CMD_SET_VPORT_ENABLE = 0x5d, + HIFC_PORT_CMD_UPDATE_MAC = 0xa4, + HIFC_PORT_CMD_GET_SFP_INFO = 0xad, + HIFC_PORT_CMD_GET_STD_SFP_INFO = 0xF0, + HIFC_PORT_CMD_GET_SFP_ABS = 0xFB, +}; + +struct hi30_ffe_data { + u8 PRE2; + u8 PRE1; + u8 POST1; + u8 POST2; + u8 MAIN; +}; + +struct hi30_ctle_data { + u8 ctlebst[3]; + u8 ctlecmband[3]; + u8 ctlermband[3]; + u8 ctleza[3]; + u8 ctlesqh[3]; + u8 ctleactgn[3]; + u8 ctlepassgn; +}; + +#define HILINK_MAX_LANE 4 + +struct hilink_lane { + u8 lane_used; + u8 hi30_ffe[5]; + u8 hi30_ctle[19]; + u8 hi30_dfe[14]; + u8 rsvd4; +}; + +struct hifc_link_info { + u8 vendor_name[16]; + /* port type: + * 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane; + * 6 - baseT; 0xffff - unknown + * + * port subtype: + * Only when port_type is fiber: + * 1 - SR; 2 - LR + */ + u32 port_type; + u32 port_sub_type; + u32 cable_length; + u8 cable_temp; + u8 cable_max_speed; /* 1(G)/10(G)/25(G)... */ + u8 sfp_type; /* 0 - qsfp; 1 - sfp */ + u8 rsvd0; + u32 power[4]; /* uW; if is sfp, only power[2] is valid */ + + u8 an_state; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ + u16 speed; /* 1(G)/10(G)/25(G)... */ + + u8 cable_absent; /* 0 - cable present; 1 - cable unpresent */ + u8 alos; /* 0 - yes; 1 - no */ + u8 rx_los; /* 0 - yes; 1 - no */ + u8 pma_status; + u32 pma_dbg_info_reg; /* pma debug info: */ + u32 pma_signal_ok_reg; /* signal ok: */ + + u32 pcs_err_blk_cnt_reg; /* error block counter: */ + u32 rf_lf_status_reg; /* RF/LF status: */ + u8 pcs_link_reg; /* pcs link: */ + u8 mac_link_reg; /* mac link: */ + u8 mac_tx_en; + u8 mac_rx_en; + u32 pcs_err_cnt; + + /* struct hifc_hilink_lane: 40 bytes */ + u8 lane1[40]; /* 25GE lane in old firmware */ + + u8 rsvd1[266]; /* hilink machine state */ + + u8 lane2[HILINK_MAX_LANE * 40]; /* max 4 lane for 40GE/100GE */ + + u8 rsvd2[2]; +}; + +struct hifc_hilink_link_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 port_id; + u8 info_type; /* 1: link up 2: link down 3 cable plugged */ + u8 rsvd1; + + struct hifc_link_info info; + + u8 rsvd2[352]; +}; + +int hifc_set_arm_bit(void *hwdev, enum hifc_set_arm_type q_type, u16 q_id); +void hifc_set_chip_present(void *hwdev); +void hifc_force_complete_all(void *hwdev); +void hifc_init_heartbeat(struct hifc_hwdev *hwdev); +void hifc_destroy_heartbeat(struct hifc_hwdev *hwdev); +u8 hifc_nic_sw_aeqe_handler(void *handle, u8 event, u64 data); +int hifc_l2nic_reset_base(struct hifc_hwdev *hwdev, u16 reset_flag); +int hifc_pf_msg_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout); +void hifc_swe_fault_handler(struct hifc_hwdev *hwdev, u8 level, + u8 event, u64 val); +bool hifc_mgmt_event_ack_first(u8 mod, u8 cmd); +int hifc_phy_init_status_judge(void *hwdev); +int hifc_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val); +int hifc_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val); +void mgmt_heartbeat_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +struct hifc_sge { + u32 hi_addr; + u32 lo_addr; + u32 len; +}; + +void hifc_cpu_to_be32(void *data, int len); +void hifc_be32_to_cpu(void *data, int len); +void hifc_set_sge(struct hifc_sge *sge, dma_addr_t addr, u32 len); +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_hwif.c b/drivers/scsi/huawei/hifc/hifc_hwif.c new file mode 100644 index 000000000000..ec84c9bc2f2f --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hwif.c @@ -0,0 +1,630 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/io-mapping.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_eqs.h" + +#define WAIT_HWIF_READY_TIMEOUT 10000 +#define HIFC_SELFTEST_RESULT 0x883C + +u32 hifc_hwif_read_reg(struct hifc_hwif *hwif, u32 reg) +{ + return be32_to_cpu(readl(hwif->cfg_regs_base + reg)); +} + +void hifc_hwif_write_reg(struct hifc_hwif *hwif, u32 reg, u32 val) +{ + writel(cpu_to_be32(val), hwif->cfg_regs_base + reg); +} + +/** + * hwif_ready - test if the HW initialization passed + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + **/ +static int hwif_ready(struct hifc_hwdev *hwdev) +{ + u32 addr, attr1; + + addr = HIFC_CSR_FUNC_ATTR1_ADDR; + attr1 = hifc_hwif_read_reg(hwdev->hwif, addr); + + if (attr1 == HIFC_PCIE_LINK_DOWN) + return -EBUSY; + + if (!HIFC_AF1_GET(attr1, MGMT_INIT_STATUS)) + return -EBUSY; + + return 0; +} + +static int wait_hwif_ready(struct hifc_hwdev *hwdev) +{ + ulong timeout = 0; + + do { + if (!hwif_ready(hwdev)) + return 0; + + usleep_range(999, 1000); + timeout++; + } while (timeout <= WAIT_HWIF_READY_TIMEOUT); + + sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n"); + return -EBUSY; +} + +/** + * set_hwif_attr - set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + * @attr0: the first attribute that was read from the hw + * @attr1: the second attribute that was read from the hw + * @attr2: the third attribute that was read from the hw + **/ +static void set_hwif_attr(struct hifc_hwif *hwif, u32 attr0, u32 attr1, + u32 attr2) +{ + hwif->attr.func_global_idx = HIFC_AF0_GET(attr0, FUNC_GLOBAL_IDX); + hwif->attr.port_to_port_idx = HIFC_AF0_GET(attr0, P2P_IDX); + hwif->attr.pci_intf_idx = HIFC_AF0_GET(attr0, PCI_INTF_IDX); + hwif->attr.vf_in_pf = HIFC_AF0_GET(attr0, VF_IN_PF); + hwif->attr.func_type = HIFC_AF0_GET(attr0, FUNC_TYPE); + + hwif->attr.ppf_idx = HIFC_AF1_GET(attr1, PPF_IDX); + + hwif->attr.num_aeqs = BIT(HIFC_AF1_GET(attr1, AEQS_PER_FUNC)); + hwif->attr.num_ceqs = BIT(HIFC_AF1_GET(attr1, CEQS_PER_FUNC)); + hwif->attr.num_irqs = BIT(HIFC_AF1_GET(attr1, IRQS_PER_FUNC)); + hwif->attr.num_dma_attr = BIT(HIFC_AF1_GET(attr1, DMA_ATTR_PER_FUNC)); +} + +/** + * get_hwif_attr - read and set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + **/ +static void get_hwif_attr(struct hifc_hwif *hwif) +{ + u32 addr, attr0, attr1, attr2; + + addr = HIFC_CSR_FUNC_ATTR0_ADDR; + attr0 = hifc_hwif_read_reg(hwif, addr); + + addr = HIFC_CSR_FUNC_ATTR1_ADDR; + attr1 = hifc_hwif_read_reg(hwif, addr); + + addr = HIFC_CSR_FUNC_ATTR2_ADDR; + attr2 = hifc_hwif_read_reg(hwif, addr); + + set_hwif_attr(hwif, attr0, attr1, attr2); +} + +void hifc_set_pf_status(struct hifc_hwif *hwif, enum hifc_pf_status status) +{ + u32 attr5 = HIFC_AF5_SET(status, PF_STATUS); + u32 addr = HIFC_CSR_FUNC_ATTR5_ADDR; + + hifc_hwif_write_reg(hwif, addr, attr5); +} + +enum hifc_pf_status hifc_get_pf_status(struct hifc_hwif *hwif) +{ + u32 attr5 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR5_ADDR); + + return HIFC_AF5_GET(attr5, PF_STATUS); +} + +enum hifc_doorbell_ctrl hifc_get_doorbell_ctrl_status(struct hifc_hwif *hwif) +{ + u32 attr4 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR4_ADDR); + + return HIFC_AF4_GET(attr4, DOORBELL_CTRL); +} + +enum hifc_outbound_ctrl hifc_get_outbound_ctrl_status(struct hifc_hwif *hwif) +{ + u32 attr4 = hifc_hwif_read_reg(hwif, HIFC_CSR_FUNC_ATTR4_ADDR); + + return HIFC_AF4_GET(attr4, OUTBOUND_CTRL); +} + +void hifc_enable_doorbell(struct hifc_hwif *hwif) +{ + u32 addr, attr4; + + addr = HIFC_CSR_FUNC_ATTR4_ADDR; + attr4 = hifc_hwif_read_reg(hwif, addr); + + attr4 = HIFC_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HIFC_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL); + + hifc_hwif_write_reg(hwif, addr, attr4); +} + +void hifc_disable_doorbell(struct hifc_hwif *hwif) +{ + u32 addr, attr4; + + addr = HIFC_CSR_FUNC_ATTR4_ADDR; + attr4 = hifc_hwif_read_reg(hwif, addr); + + attr4 = HIFC_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HIFC_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL); + + hifc_hwif_write_reg(hwif, addr, attr4); +} + +/** + * set_ppf - try to set hwif as ppf and set the type of hwif in this case + * @hwif: the hardware interface of a pci function device + **/ +static void set_ppf(struct hifc_hwif *hwif) +{ + struct hifc_func_attr *attr = &hwif->attr; + u32 addr, val, ppf_election; + + /* Read Modify Write */ + addr = HIFC_CSR_PPF_ELECTION_ADDR; + + val = hifc_hwif_read_reg(hwif, addr); + val = HIFC_PPF_ELECTION_CLEAR(val, IDX); + + ppf_election = HIFC_PPF_ELECTION_SET(attr->func_global_idx, IDX); + val |= ppf_election; + + hifc_hwif_write_reg(hwif, addr, val); + + /* Check PPF */ + val = hifc_hwif_read_reg(hwif, addr); + + attr->ppf_idx = HIFC_PPF_ELECTION_GET(val, IDX); + if (attr->ppf_idx == attr->func_global_idx) + attr->func_type = TYPE_PPF; +} + +/** + * get_mpf - get the mpf index into the hwif + * @hwif: the hardware interface of a pci function device + **/ +static void get_mpf(struct hifc_hwif *hwif) +{ + struct hifc_func_attr *attr = &hwif->attr; + u32 mpf_election, addr; + + addr = HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR; + + mpf_election = hifc_hwif_read_reg(hwif, addr); + attr->mpf_idx = HIFC_MPF_ELECTION_GET(mpf_election, IDX); +} + +/** + * set_mpf - try to set hwif as mpf and set the mpf idx in hwif + * @hwif: the hardware interface of a pci function device + **/ +static void set_mpf(struct hifc_hwif *hwif) +{ + struct hifc_func_attr *attr = &hwif->attr; + u32 addr, val, mpf_election; + + /* Read Modify Write */ + addr = HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR; + + val = hifc_hwif_read_reg(hwif, addr); + + val = HIFC_MPF_ELECTION_CLEAR(val, IDX); + mpf_election = HIFC_MPF_ELECTION_SET(attr->func_global_idx, IDX); + + val |= mpf_election; + hifc_hwif_write_reg(hwif, addr, val); +} + +static void init_db_area_idx(struct hifc_free_db_area *free_db_area) +{ + u32 i; + + for (i = 0; i < HIFC_DB_MAX_AREAS; i++) + free_db_area->db_idx[i] = i; + + free_db_area->num_free = HIFC_DB_MAX_AREAS; + + spin_lock_init(&free_db_area->idx_lock); +} + +static int get_db_idx(struct hifc_hwif *hwif, u32 *idx) +{ + struct hifc_free_db_area *free_db_area = &hwif->free_db_area; + u32 pos; + u32 pg_idx; + + spin_lock(&free_db_area->idx_lock); + +retry: + if (free_db_area->num_free == 0) { + spin_unlock(&free_db_area->idx_lock); + return -ENOMEM; + } + + free_db_area->num_free--; + + pos = free_db_area->alloc_pos++; + pos &= HIFC_DB_MAX_AREAS - 1; + + pg_idx = free_db_area->db_idx[pos]; + + free_db_area->db_idx[pos] = 0xFFFFFFFF; + + /* pg_idx out of range */ + if (pg_idx >= HIFC_DB_MAX_AREAS) + goto retry; + + spin_unlock(&free_db_area->idx_lock); + + *idx = pg_idx; + + return 0; +} + +static void free_db_idx(struct hifc_hwif *hwif, u32 idx) +{ + struct hifc_free_db_area *free_db_area = &hwif->free_db_area; + u32 pos; + + if (idx >= HIFC_DB_MAX_AREAS) + return; + + spin_lock(&free_db_area->idx_lock); + + pos = free_db_area->return_pos++; + pos &= HIFC_DB_MAX_AREAS - 1; + + free_db_area->db_idx[pos] = idx; + + free_db_area->num_free++; + + spin_unlock(&free_db_area->idx_lock); +} + +void hifc_free_db_addr(void *hwdev, void __iomem *db_base, + void __iomem *dwqe_base) +{ + struct hifc_hwif *hwif; + u32 idx; + + if (!hwdev || !db_base) + return; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + idx = DB_IDX(db_base, hwif->db_base); + +#if defined(__aarch64__) + /* No need to unmap */ +#else + if (dwqe_base) + io_mapping_unmap(dwqe_base); +#endif + + free_db_idx(hwif, idx); +} + +int hifc_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base) +{ + struct hifc_hwif *hwif; + u64 offset; + u32 idx; + int err; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + err = get_db_idx(hwif, &idx); + if (err) + return -EFAULT; + + *db_base = hwif->db_base + idx * HIFC_DB_PAGE_SIZE; + + if (!dwqe_base) + return 0; + + offset = ((u64)idx) << PAGE_SHIFT; + +#if defined(__aarch64__) + *dwqe_base = hwif->dwqe_mapping + offset; +#else + *dwqe_base = io_mapping_map_wc(hwif->dwqe_mapping, offset, + HIFC_DB_PAGE_SIZE); +#endif + + if (!(*dwqe_base)) { + hifc_free_db_addr(hwdev, *db_base, NULL); + return -EFAULT; + } + + return 0; +} + +void hifc_set_msix_state(void *hwdev, u16 msix_idx, enum hifc_msix_state flag) +{ + struct hifc_hwif *hwif; + u32 offset = msix_idx * HIFC_PCI_MSIX_ENTRY_SIZE + + HIFC_PCI_MSIX_ENTRY_VECTOR_CTRL; + u32 mask_bits; + + if (!hwdev) + return; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + mask_bits = readl(hwif->intr_regs_base + offset); + mask_bits &= ~HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + if (flag) + mask_bits |= HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + + writel(mask_bits, hwif->intr_regs_base + offset); +} + +static void disable_all_msix(struct hifc_hwdev *hwdev) +{ + u16 num_irqs = hwdev->hwif->attr.num_irqs; + u16 i; + + for (i = 0; i < num_irqs; i++) + hifc_set_msix_state(hwdev, i, HIFC_MSIX_DISABLE); +} + +static int wait_until_doorbell_and_outbound_enabled(struct hifc_hwif *hwif) +{ + enum hifc_doorbell_ctrl db_ctrl; + enum hifc_outbound_ctrl outbound_ctrl; + u32 cnt = 0; + + while (cnt < HIFC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT) { + db_ctrl = hifc_get_doorbell_ctrl_status(hwif); + outbound_ctrl = hifc_get_outbound_ctrl_status(hwif); + + if (outbound_ctrl == ENABLE_OUTBOUND && + db_ctrl == ENABLE_DOORBELL) + return 0; + + usleep_range(900, 1000); + cnt++; + } + + return -EFAULT; +} + +static void __print_selftest_reg(struct hifc_hwdev *hwdev) +{ + u32 addr, attr0, attr1; + + addr = HIFC_CSR_FUNC_ATTR1_ADDR; + attr1 = hifc_hwif_read_reg(hwdev->hwif, addr); + + if (attr1 == HIFC_PCIE_LINK_DOWN) { + sdk_err(hwdev->dev_hdl, "PCIE is link down\n"); + return; + } + + addr = HIFC_CSR_FUNC_ATTR0_ADDR; + attr0 = hifc_hwif_read_reg(hwdev->hwif, addr); + if (HIFC_AF0_GET(attr0, FUNC_TYPE) != TYPE_VF && + !HIFC_AF0_GET(attr0, PCI_INTF_IDX)) + sdk_err(hwdev->dev_hdl, "Selftest reg: 0x%08x\n", + hifc_hwif_read_reg(hwdev->hwif, + HIFC_SELFTEST_RESULT)); +} + +/** + * hifc_init_hwif - initialize the hw interface + * @hwdev: the pointer to hw device + * @cfg_reg_base: configuration base address + * Return: 0 - success, negative - failure + **/ +int hifc_init_hwif(struct hifc_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, u64 db_base_phy, + void *db_base, void *dwqe_mapping) +{ + struct hifc_hwif *hwif; + int err; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwdev->hwif = hwif; + hwif->pdev = hwdev->pcidev_hdl; + + hwif->cfg_regs_base = cfg_reg_base; + hwif->intr_regs_base = intr_reg_base; + + hwif->db_base_phy = db_base_phy; + hwif->db_base = db_base; + hwif->dwqe_mapping = dwqe_mapping; + init_db_area_idx(&hwif->free_db_area); + + err = wait_hwif_ready(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Chip status is not ready\n"); + __print_selftest_reg(hwdev); + goto hwif_ready_err; + } + + get_hwif_attr(hwif); + + err = wait_until_doorbell_and_outbound_enabled(hwif); + if (err) { + sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled\n"); + goto hwif_ready_err; + } + + set_ppf(hwif); + + if (HIFC_IS_PPF(hwdev)) + set_mpf(hwif); + + get_mpf(hwif); + + disable_all_msix(hwdev); + /* disable mgmt cpu report any event */ + hifc_set_pf_status(hwdev->hwif, HIFC_PF_STATUS_INIT); + + pr_info("global_func_idx: %d, func_type: %d, host_id: %d, ppf: %d, mpf: %d\n", + hwif->attr.func_global_idx, hwif->attr.func_type, + hwif->attr.pci_intf_idx, hwif->attr.ppf_idx, + hwif->attr.mpf_idx); + + return 0; + +hwif_ready_err: + kfree(hwif); + + return err; +} + +/** + * hifc_free_hwif - free the hw interface + * @hwdev: the pointer to hw device + **/ +void hifc_free_hwif(struct hifc_hwdev *hwdev) +{ + kfree(hwdev->hwif); +} + +int hifc_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned flag, + struct hifc_dma_addr_align *mem_align) +{ + void *vaddr, *align_vaddr; + dma_addr_t paddr, align_paddr; + u64 real_size = size; + + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + /* align */ + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + dma_free_coherent(dev_hdl, real_size, vaddr, paddr); + + /* realloc memory for align */ + real_size = size + align; + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr)); + +out: + mem_align->real_size = (u32)real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} + +void hifc_dma_free_coherent_align(void *dev_hdl, + struct hifc_dma_addr_align *mem_align) +{ + dma_free_coherent(dev_hdl, mem_align->real_size, + mem_align->ori_vaddr, mem_align->ori_paddr); +} + +u16 hifc_global_func_id(void *hwdev) +{ + struct hifc_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + return hwif->attr.func_global_idx; +} + +/** + * get function id from register,used by sriov hot migration process + * @hwdev: the pointer to hw device + **/ +u16 hifc_global_func_id_hw(void *hwdev) +{ + u32 addr, attr0; + struct hifc_hwdev *dev; + + dev = (struct hifc_hwdev *)hwdev; + addr = HIFC_CSR_FUNC_ATTR0_ADDR; + attr0 = hifc_hwif_read_reg(dev->hwif, addr); + + return HIFC_AF0_GET(attr0, FUNC_GLOBAL_IDX); +} + +/** + * get function id, used by sriov hot migratition process. + * @hwdev: the pointer to hw device + * @func_id: function id + **/ +int hifc_global_func_id_get(void *hwdev, u16 *func_id) +{ + *func_id = hifc_global_func_id(hwdev); + return 0; +} + +u8 hifc_pcie_itf_id(void *hwdev) +{ + struct hifc_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + return hwif->attr.pci_intf_idx; +} +EXPORT_SYMBOL(hifc_pcie_itf_id); + +enum func_type hifc_func_type(void *hwdev) +{ + struct hifc_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + return hwif->attr.func_type; +} + +u8 hifc_ppf_idx(void *hwdev) +{ + struct hifc_hwif *hwif; + + if (!hwdev) + return 0; + + hwif = ((struct hifc_hwdev *)hwdev)->hwif; + + return hwif->attr.ppf_idx; +} diff --git a/drivers/scsi/huawei/hifc/hifc_hwif.h b/drivers/scsi/huawei/hifc/hifc_hwif.h new file mode 100644 index 000000000000..da72253dcf5f --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hwif.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_HWIF_H +#define HIFC_HWIF_H + +#include "hifc_hwdev.h" + +#define HIFC_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000 +#define HIFC_CSR_GLOBAL_BASE_ADDR 0x4000 +/* HW interface registers */ +#define HIFC_CSR_FUNC_ATTR0_ADDR 0x0 +#define HIFC_CSR_FUNC_ATTR1_ADDR 0x4 +#define HIFC_CSR_FUNC_ATTR2_ADDR 0x8 +#define HIFC_CSR_FUNC_ATTR4_ADDR 0x10 + +#define HIFC_CSR_FUNC_ATTR5_ADDR 0x14 +#define HIFC_PCI_MSIX_ENTRY_SIZE 16 +#define HIFC_PCI_MSIX_ENTRY_VECTOR_CTRL 12 +#define HIFC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1 + +/* total doorbell or direct wqe size is 512kB, db num: 128, dwqe: 128*/ +#define HIFC_DB_DWQE_SIZE 0x00080000 +/* db/dwqe page size: 4K */ +#define HIFC_DB_PAGE_SIZE 0x00001000ULL +#define HIFC_DB_MAX_AREAS (HIFC_DB_DWQE_SIZE / HIFC_DB_PAGE_SIZE) + +#define HIFC_ELECTION_BASE 0x200 +#define HIFC_PPF_ELECTION_STRIDE 0x4 +#define HIFC_CSR_MAX_PORTS 4 +#define HIFC_CSR_PPF_ELECTION_ADDR \ + (HIFC_CSR_GLOBAL_BASE_ADDR + HIFC_ELECTION_BASE) + +#define HIFC_CSR_GLOBAL_MPF_ELECTION_ADDR \ + (HIFC_CSR_GLOBAL_BASE_ADDR + HIFC_ELECTION_BASE + \ + HIFC_CSR_MAX_PORTS * HIFC_PPF_ELECTION_STRIDE) +#define DB_IDX(db, db_base) \ + ((u32)(((ulong)(db) - (ulong)(db_base)) / \ + HIFC_DB_PAGE_SIZE)) + +#define HIFC_AF0_FUNC_GLOBAL_IDX_SHIFT 0 +#define HIFC_AF0_P2P_IDX_SHIFT 10 +#define HIFC_AF0_PCI_INTF_IDX_SHIFT 14 +#define HIFC_AF0_VF_IN_PF_SHIFT 16 +#define HIFC_AF0_FUNC_TYPE_SHIFT 24 +#define HIFC_AF0_FUNC_GLOBAL_IDX_MASK 0x3FF +#define HIFC_AF0_P2P_IDX_MASK 0xF +#define HIFC_AF0_PCI_INTF_IDX_MASK 0x3 +#define HIFC_AF0_VF_IN_PF_MASK 0xFF +#define HIFC_AF0_FUNC_TYPE_MASK 0x1 + +#define HIFC_AF0_GET(val, member) \ + (((val) >> HIFC_AF0_##member##_SHIFT) & HIFC_AF0_##member##_MASK) + +#define HIFC_AF1_PPF_IDX_SHIFT 0 +#define HIFC_AF1_AEQS_PER_FUNC_SHIFT 8 +#define HIFC_AF1_CEQS_PER_FUNC_SHIFT 12 +#define HIFC_AF1_IRQS_PER_FUNC_SHIFT 20 +#define HIFC_AF1_DMA_ATTR_PER_FUNC_SHIFT 24 +#define HIFC_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define HIFC_AF1_PF_INIT_STATUS_SHIFT 31 + +#define HIFC_AF1_PPF_IDX_MASK 0x1F +#define HIFC_AF1_AEQS_PER_FUNC_MASK 0x3 +#define HIFC_AF1_CEQS_PER_FUNC_MASK 0x7 +#define HIFC_AF1_IRQS_PER_FUNC_MASK 0xF +#define HIFC_AF1_DMA_ATTR_PER_FUNC_MASK 0x7 +#define HIFC_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define HIFC_AF1_PF_INIT_STATUS_MASK 0x1 + +#define HIFC_AF1_GET(val, member) \ + (((val) >> HIFC_AF1_##member##_SHIFT) & HIFC_AF1_##member##_MASK) + +#define HIFC_AF4_OUTBOUND_CTRL_SHIFT 0 +#define HIFC_AF4_DOORBELL_CTRL_SHIFT 1 +#define HIFC_AF4_OUTBOUND_CTRL_MASK 0x1 +#define HIFC_AF4_DOORBELL_CTRL_MASK 0x1 + +#define HIFC_AF4_GET(val, member) \ + (((val) >> HIFC_AF4_##member##_SHIFT) & HIFC_AF4_##member##_MASK) + +#define HIFC_AF4_SET(val, member) \ + (((val) & HIFC_AF4_##member##_MASK) << HIFC_AF4_##member##_SHIFT) + +#define HIFC_AF4_CLEAR(val, member) \ + ((val) & (~(HIFC_AF4_##member##_MASK << \ + HIFC_AF4_##member##_SHIFT))) + +#define HIFC_AF5_PF_STATUS_SHIFT 0 +#define HIFC_AF5_PF_STATUS_MASK 0xFFFF + +#define HIFC_AF5_SET(val, member) \ + (((val) & HIFC_AF5_##member##_MASK) << HIFC_AF5_##member##_SHIFT) + +#define HIFC_AF5_GET(val, member) \ + (((val) >> HIFC_AF5_##member##_SHIFT) & HIFC_AF5_##member##_MASK) + +#define HIFC_PPF_ELECTION_IDX_SHIFT 0 +#define HIFC_PPF_ELECTION_IDX_MASK 0x1F + +#define HIFC_PPF_ELECTION_SET(val, member) \ + (((val) & HIFC_PPF_ELECTION_##member##_MASK) << \ + HIFC_PPF_ELECTION_##member##_SHIFT) + +#define HIFC_PPF_ELECTION_GET(val, member) \ + (((val) >> HIFC_PPF_ELECTION_##member##_SHIFT) & \ + HIFC_PPF_ELECTION_##member##_MASK) + +#define HIFC_PPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HIFC_PPF_ELECTION_##member##_MASK \ + << HIFC_PPF_ELECTION_##member##_SHIFT))) + +#define HIFC_MPF_ELECTION_IDX_SHIFT 0 +#define HIFC_MPF_ELECTION_IDX_MASK 0x1F + +#define HIFC_MPF_ELECTION_SET(val, member) \ + (((val) & HIFC_MPF_ELECTION_##member##_MASK) << \ + HIFC_MPF_ELECTION_##member##_SHIFT) + +#define HIFC_MPF_ELECTION_GET(val, member) \ + (((val) >> HIFC_MPF_ELECTION_##member##_SHIFT) & \ + HIFC_MPF_ELECTION_##member##_MASK) + +#define HIFC_MPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HIFC_MPF_ELECTION_##member##_MASK \ + << HIFC_MPF_ELECTION_##member##_SHIFT))) + +#define HIFC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) +#define HIFC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) +#define HIFC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) +#define HIFC_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx) + +#define HIFC_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type) +#define HIFC_IS_PPF(dev) (HIFC_FUNC_TYPE(dev) == TYPE_PPF) + +enum hifc_pcie_nosnoop { + HIFC_PCIE_SNOOP = 0, + HIFC_PCIE_NO_SNOOP = 1, +}; + +enum hifc_pcie_tph { + HIFC_PCIE_TPH_DISABLE = 0, + HIFC_PCIE_TPH_ENABLE = 1, +}; + +enum hifc_pf_status { + HIFC_PF_STATUS_INIT = 0X0, + HIFC_PF_STATUS_ACTIVE_FLAG = 0x11, + HIFC_PF_STATUS_FLR_START_FLAG = 0x12, + HIFC_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +enum hifc_outbound_ctrl { + ENABLE_OUTBOUND = 0x0, + DISABLE_OUTBOUND = 0x1, +}; + +enum hifc_doorbell_ctrl { + ENABLE_DOORBELL = 0x0, + DISABLE_DOORBELL = 0x1, +}; + +struct hifc_free_db_area { + u32 db_idx[HIFC_DB_MAX_AREAS]; + u32 num_free; + u32 alloc_pos; + u32 return_pos; + /* spinlock for allocating doorbell area */ + spinlock_t idx_lock; +}; + +enum func_type { + TYPE_PF, + TYPE_VF, + TYPE_PPF, + TYPE_UNKNOWN, +}; + +struct hifc_func_attr { + u16 func_global_idx; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 vf_in_pf; + enum func_type func_type; + + u8 mpf_idx; + + u8 ppf_idx; + + u16 num_irqs; /* max: 2 ^ 15 */ + u8 num_aeqs; /* max: 2 ^ 3 */ + u8 num_ceqs; /* max: 2 ^ 7 */ + + u8 num_dma_attr; /* max: 2 ^ 6 */ +}; + +struct hifc_hwif { + u8 __iomem *cfg_regs_base; + u8 __iomem *intr_regs_base; + u64 db_base_phy; + u8 __iomem *db_base; + +#if defined(__aarch64__) + void __iomem *dwqe_mapping; +#else + struct io_mapping *dwqe_mapping; +#endif + struct hifc_free_db_area free_db_area; + struct hifc_func_attr attr; + void *pdev; +}; + +struct hifc_dma_addr_align { + u32 real_size; + void *ori_vaddr; + dma_addr_t ori_paddr; + void *align_vaddr; + dma_addr_t align_paddr; +}; + +u32 hifc_hwif_read_reg(struct hifc_hwif *hwif, u32 reg); +void hifc_hwif_write_reg(struct hifc_hwif *hwif, u32 reg, u32 val); +void hifc_set_pf_status(struct hifc_hwif *hwif, enum hifc_pf_status status); +enum hifc_pf_status hifc_get_pf_status(struct hifc_hwif *hwif); +enum hifc_doorbell_ctrl + hifc_get_doorbell_ctrl_status(struct hifc_hwif *hwif); +enum hifc_outbound_ctrl + hifc_get_outbound_ctrl_status(struct hifc_hwif *hwif); +void hifc_enable_doorbell(struct hifc_hwif *hwif); +void hifc_disable_doorbell(struct hifc_hwif *hwif); +int hifc_init_hwif(struct hifc_hwdev *hwdev, void *cfg_reg_base, + void *intr_reg_base, u64 db_base_phy, + void *db_base, void *dwqe_mapping); +void hifc_free_hwif(struct hifc_hwdev *hwdev); +int hifc_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned flag, + struct hifc_dma_addr_align *mem_align); +void hifc_dma_free_coherent_align(void *dev_hdl, + struct hifc_dma_addr_align *mem_align); +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_mgmt.c b/drivers/scsi/huawei/hifc/hifc_mgmt.c new file mode 100644 index 000000000000..3f4818898e8d --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_mgmt.c @@ -0,0 +1,1426 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/completion.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_eqs.h" + +#define BUF_OUT_DEFAULT_SIZE 1 +#define SEGMENT_LEN 48 +#define MGMT_MSG_MAX_SEQ_ID (ALIGN(HIFC_MSG_TO_MGMT_MAX_LEN, \ + SEGMENT_LEN) / SEGMENT_LEN) + +#define MAX_PF_MGMT_BUF_SIZE 2048UL +#define MGMT_MSG_SIZE_MIN 20 +#define MGMT_MSG_SIZE_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 +#define MGMT_MSG_TIMEOUT 5000 /* millisecond */ +#define SYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_ID_MASK 0x1FF +#define ASYNC_MSG_FLAG 0x200 +#define MSG_NO_RESP 0xFFFF +#define MAX_MSG_SZ 2016 + +#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ) + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) + +#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) + +#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ + ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \ + | ASYNC_MSG_FLAG) + +static void pf_to_mgmt_send_event_set(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + int event_flag) +{ + spin_lock(&pf_to_mgmt->sync_event_lock); + pf_to_mgmt->event_flag = event_flag; + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +/** + * hifc_register_mgmt_msg_cb - register sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + * @pri_handle: pri handle function + * @callback: the handler for a sync message that will handle messages + * Return: 0 - success, negative - failure + **/ +int hifc_register_mgmt_msg_cb(void *hwdev, enum hifc_mod_type mod, + void *pri_handle, hifc_mgmt_msg_cb callback) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + + if (mod >= HIFC_MOD_HW_MAX || !hwdev) + return -EFAULT; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return -EINVAL; + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback; + pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle; + + set_bit(HIFC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + return 0; +} + +/** + * hifc_unregister_mgmt_msg_cb - unregister sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + **/ +void hifc_unregister_mgmt_msg_cb(void *hwdev, enum hifc_mod_type mod) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + + if (!hwdev || mod >= HIFC_MOD_HW_MAX) + return; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + clear_bit(HIFC_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + while (test_bit(HIFC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[mod])) + usleep_range(900, 1000); + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL; + pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL; +} + +void hifc_comm_recv_mgmt_self_cmd_reg(void *hwdev, u8 cmd, + comm_up_self_msg_proc proc) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + u8 cmd_idx; + + if (!hwdev || !proc) + return; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + cmd_idx = pf_to_mgmt->proc.cmd_num; + if (cmd_idx >= HIFC_COMM_SELF_CMD_MAX) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Register recv up process failed(cmd=0x%x)\r\n", cmd); + return; + } + + pf_to_mgmt->proc.info[cmd_idx].cmd = cmd; + pf_to_mgmt->proc.info[cmd_idx].proc = proc; + + pf_to_mgmt->proc.cmd_num++; +} + +void hifc_comm_recv_up_self_cmd_unreg(void *hwdev, u8 cmd) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + u8 cmd_idx; + + if (!hwdev) + return; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + cmd_idx = pf_to_mgmt->proc.cmd_num; + if (cmd_idx >= HIFC_COMM_SELF_CMD_MAX) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Unregister recv up process failed(cmd=0x%x)\r\n", cmd); + return; + } + + for (cmd_idx = 0; cmd_idx < HIFC_COMM_SELF_CMD_MAX; cmd_idx++) { + if (cmd == pf_to_mgmt->proc.info[cmd_idx].cmd) { + pf_to_mgmt->proc.info[cmd_idx].cmd = 0; + pf_to_mgmt->proc.info[cmd_idx].proc = NULL; + pf_to_mgmt->proc.cmd_num--; + } + } +} + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * Return: the total message length + **/ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size; + + msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); + + if (msg_size > MGMT_MSG_SIZE_MIN) + msg_size = MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - MGMT_MSG_SIZE_MIN), + MGMT_MSG_SIZE_STEP); + else + msg_size = MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @header: pointer of the header to prepare + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @ack_type: message ack type + * @direction: the direction of the original message + * @cmd: vmd type + * @msg_id: message id + **/ +static void prepare_header(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + u64 *header, u16 msg_len, enum hifc_mod_type mod, + enum hifc_msg_ack_type ack_type, + enum hifc_msg_direction_type direction, + enum hifc_mgmt_cmd cmd, u32 msg_id) +{ + struct hifc_hwif *hwif = pf_to_mgmt->hwdev->hwif; + + *header = HIFC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HIFC_MSG_HEADER_SET(mod, MODULE) | + HIFC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HIFC_MSG_HEADER_SET(ack_type, NO_ACK) | + HIFC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HIFC_MSG_HEADER_SET(0, SEQID) | + HIFC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HIFC_MSG_HEADER_SET(direction, DIRECTION) | + HIFC_MSG_HEADER_SET(cmd, CMD) | + HIFC_MSG_HEADER_SET(HIFC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HIFC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HIFC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +static void clp_prepare_header(struct hifc_hwdev *hwdev, + u64 *header, u16 msg_len, enum hifc_mod_type mod, + enum hifc_msg_ack_type ack_type, + enum hifc_msg_direction_type direction, + enum hifc_mgmt_cmd cmd, u32 msg_id) +{ + struct hifc_hwif *hwif = hwdev->hwif; + + *header = HIFC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HIFC_MSG_HEADER_SET(mod, MODULE) | + HIFC_MSG_HEADER_SET(msg_len, SEG_LEN) | + HIFC_MSG_HEADER_SET(ack_type, NO_ACK) | + HIFC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HIFC_MSG_HEADER_SET(0, SEQID) | + HIFC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HIFC_MSG_HEADER_SET(direction, DIRECTION) | + HIFC_MSG_HEADER_SET(cmd, CMD) | + HIFC_MSG_HEADER_SET(HIFC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) | + HIFC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) | + HIFC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header to prepare + * @msg: the data of the message + * @msg_len: the length of the message + **/ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, + int msg_len) +{ + memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd, header, sizeof(*header)); + + mgmt_cmd += sizeof(*header); + memcpy(mgmt_cmd, msg, msg_len); +} + +/** + * send_msg_to_mgmt_async - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the data of the message + * @msg_len: the length of the message + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_async(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + enum hifc_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hifc_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->async_msg_buf; + struct hifc_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (!hifc_get_chip_present_flag(pf_to_mgmt->hwdev)) + return -EFAULT; + + if (direction == HIFC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, HIFC_MSG_ACK, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, HIFC_MSG_ACK, + direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; + + return hifc_api_cmd_write(chain, HIFC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +int hifc_pf_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, + u8 cmd, void *buf_in, u16 in_size) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hifc_hwdev *)hwdev)->dev_hdl; + int err; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the async_msg_buf */ + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + ASYNC_MSG_ID_INC(pf_to_mgmt); + + err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size, + HIFC_MSG_DIRECT_SEND, MSG_NO_RESP); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); + + if (err) { + sdk_err(dev, "Failed to send async mgmt msg\n"); + return err; + } + + return 0; +} + +/** + * send_msg_to_mgmt_sync - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the msg data + * @msg_len: the msg data length + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_sync(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + enum hifc_mod_type mod, u8 cmd, + void *msg, u16 msg_len, + enum hifc_msg_ack_type ack_type, + enum hifc_msg_direction_type direction, + u16 resp_msg_id) +{ + void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; + struct hifc_api_cmd_chain *chain; + u64 header; + u16 cmd_size = mgmt_msg_len(msg_len); + + if (!hifc_get_chip_present_flag(pf_to_mgmt->hwdev)) + return -EFAULT; + + if (direction == HIFC_MSG_RESPONSE) + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, resp_msg_id); + else + prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, + direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt)); + + if (ack_type == HIFC_MSG_ACK) + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START); + + prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + + chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_WRITE_TO_MGMT_CPU]; + + return hifc_api_cmd_write(chain, HIFC_NODE_ID_MGMT_HOST, mgmt_cmd, + cmd_size); +} + +static inline void msg_to_mgmt_pre(enum hifc_mod_type mod, void *buf_in) +{ + struct hifc_msg_head *msg_head; + + /* set aeq fix num to 3, need to ensure response aeq id < 3*/ + if (mod == HIFC_MOD_COMM || mod == HIFC_MOD_L2NIC) { + msg_head = buf_in; + + if (msg_head->resp_aeq_num >= HIFC_MAX_AEQS) + msg_head->resp_aeq_num = 0; + } +} + +int hifc_pf_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hifc_hwdev *)hwdev)->dev_hdl; + struct hifc_recv_msg *recv_msg; + struct completion *recv_done; + ulong timeo; + int err; + ulong ret; + + msg_to_mgmt_pre(mod, buf_in); + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + recv_done = &recv_msg->recv_done; + + init_completion(recv_done); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HIFC_MSG_ACK, HIFC_MSG_DIRECT_SEND, + MSG_NO_RESP); + if (err) { + sdk_err(dev, "Failed to send sync msg to mgmt, sync_msg_id: %d\n", + pf_to_mgmt->sync_msg_id); + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL); + goto unlock_sync_msg; + } + + timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); + + ret = wait_for_completion_timeout(recv_done, timeo); + if (!ret) { + sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %d\n", + pf_to_mgmt->sync_msg_id); + hifc_dump_aeq_info((struct hifc_hwdev *)hwdev); + err = -ETIMEDOUT; + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT); + goto unlock_sync_msg; + } + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END); + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) { + up(&pf_to_mgmt->sync_msg_lock); + return -ETIMEDOUT; + } + + if (buf_out && out_size) { + if (*out_size < recv_msg->msg_len) { + sdk_err(dev, "Invalid response message length: %d for mod %d cmd %d from mgmt, should less than: %d\n", + recv_msg->msg_len, mod, cmd, *out_size); + err = -EFAULT; + goto unlock_sync_msg; + } + + if (recv_msg->msg_len) + memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); + + *out_size = recv_msg->msg_len; + } + +unlock_sync_msg: + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +static int __get_clp_reg(void *hwdev, enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *reg_addr) +{ + struct hifc_hwdev *dev = hwdev; + u32 offset; + + offset = HIFC_CLP_REG_GAP * hifc_pcie_itf_id(dev); + + switch (reg_type) { + case HIFC_CLP_BA_HOST: + *reg_addr = (data_type == HIFC_CLP_REQ_HOST) ? + HIFC_CLP_REG(REQ_SRAM_BA) : + HIFC_CLP_REG(RSP_SRAM_BA); + break; + + case HIFC_CLP_SIZE_HOST: + *reg_addr = HIFC_CLP_REG(SRAM_SIZE); + break; + + case HIFC_CLP_LEN_HOST: + *reg_addr = (data_type == HIFC_CLP_REQ_HOST) ? + HIFC_CLP_REG(REQ) : HIFC_CLP_REG(RSP); + break; + + case HIFC_CLP_START_REQ_HOST: + *reg_addr = HIFC_CLP_REG(REQ); + break; + + case HIFC_CLP_READY_RSP_HOST: + *reg_addr = HIFC_CLP_REG(RSP); + break; + + default: + *reg_addr = 0; + break; + } + if (*reg_addr == 0) + return -EINVAL; + + *reg_addr += offset; + + return 0; +} + +static inline int clp_param_valid(struct hifc_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type) +{ + if (data_type == HIFC_CLP_REQ_HOST && + reg_type == HIFC_CLP_READY_RSP_HOST) + return -EINVAL; + + if (data_type == HIFC_CLP_RSP_HOST && + reg_type == HIFC_CLP_START_REQ_HOST) + return -EINVAL; + + return 0; +} + +static u32 get_clp_reg_value(struct hifc_hwdev *hwdev, + enum clp_reg_type reg_type, u32 reg_addr) +{ + u32 reg_value; + + reg_value = hifc_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HIFC_CLP_BA_HOST: + reg_value = ((reg_value >> + HIFC_CLP_OFFSET(SRAM_BASE)) & + HIFC_CLP_MASK(SRAM_BASE)); + break; + + case HIFC_CLP_SIZE_HOST: + reg_value = ((reg_value >> + HIFC_CLP_OFFSET(SRAM_SIZE)) & + HIFC_CLP_MASK(SRAM_SIZE)); + break; + + case HIFC_CLP_LEN_HOST: + reg_value = ((reg_value >> HIFC_CLP_OFFSET(LEN)) & + HIFC_CLP_MASK(LEN)); + break; + + case HIFC_CLP_START_REQ_HOST: + reg_value = ((reg_value >> HIFC_CLP_OFFSET(START)) & + HIFC_CLP_MASK(START)); + break; + + case HIFC_CLP_READY_RSP_HOST: + reg_value = ((reg_value >> HIFC_CLP_OFFSET(READY)) & + HIFC_CLP_MASK(READY)); + break; + + default: + break; + } + + return reg_value; +} + +static int hifc_read_clp_reg(struct hifc_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *read_value) +{ + u32 reg_addr; + int err; + + err = clp_param_valid(hwdev, data_type, reg_type); + if (err) + return err; + + err = __get_clp_reg(hwdev, data_type, reg_type, ®_addr); + if (err) + return err; + + *read_value = get_clp_reg_value(hwdev, reg_type, reg_addr); + + return 0; +} + +static int __check_data_type(enum clp_data_type data_type, + enum clp_reg_type reg_type) +{ + if (data_type == HIFC_CLP_REQ_HOST && + reg_type == HIFC_CLP_READY_RSP_HOST) + return -EINVAL; + if (data_type == HIFC_CLP_RSP_HOST && + reg_type == HIFC_CLP_START_REQ_HOST) + return -EINVAL; + + return 0; +} + +static int __check_reg_value(enum clp_reg_type reg_type, u32 value) +{ + if (reg_type == HIFC_CLP_BA_HOST && + value > HIFC_CLP_SRAM_BASE_REG_MAX) + return -EINVAL; + + if (reg_type == HIFC_CLP_SIZE_HOST && + value > HIFC_CLP_SRAM_SIZE_REG_MAX) + return -EINVAL; + + if (reg_type == HIFC_CLP_LEN_HOST && + value > HIFC_CLP_LEN_REG_MAX) + return -EINVAL; + + if ((reg_type == HIFC_CLP_START_REQ_HOST || + reg_type == HIFC_CLP_READY_RSP_HOST) && + value > HIFC_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + return 0; +} + +static void hifc_write_clp_reg(struct hifc_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 value) +{ + u32 reg_addr, reg_value; + + if (__check_data_type(data_type, reg_type)) + return; + + if (__check_reg_value(reg_type, value)) + return; + + if (__get_clp_reg(hwdev, data_type, reg_type, ®_addr)) + return; + + reg_value = hifc_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HIFC_CLP_LEN_HOST: + reg_value = reg_value & + (~(HIFC_CLP_MASK(LEN) << HIFC_CLP_OFFSET(LEN))); + reg_value = reg_value | (value << HIFC_CLP_OFFSET(LEN)); + break; + + case HIFC_CLP_START_REQ_HOST: + reg_value = reg_value & + (~(HIFC_CLP_MASK(START) << + HIFC_CLP_OFFSET(START))); + reg_value = reg_value | (value << HIFC_CLP_OFFSET(START)); + break; + + case HIFC_CLP_READY_RSP_HOST: + reg_value = reg_value & + (~(HIFC_CLP_MASK(READY) << + HIFC_CLP_OFFSET(READY))); + reg_value = reg_value | (value << HIFC_CLP_OFFSET(READY)); + break; + + default: + return; + } + + hifc_hwif_write_reg(hwdev->hwif, reg_addr, reg_value); +} + +static int hifc_read_clp_data(struct hifc_hwdev *hwdev, + void *buf_out, u16 *out_size) +{ + int err; + u32 reg = HIFC_CLP_DATA(RSP); + u32 ready, delay_cnt; + u32 *ptr = (u32 *)buf_out; + u32 temp_out_size = 0; + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_READY_RSP_HOST, &ready); + if (err) + return err; + + delay_cnt = 0; + while (ready == 0) { + usleep_range(9000, 10000); + delay_cnt++; + err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_READY_RSP_HOST, &ready); + if (err || delay_cnt > HIFC_CLP_DELAY_CNT_MAX) { + sdk_err(hwdev->dev_hdl, "timeout with delay_cnt:%d\n", + delay_cnt); + return -EINVAL; + } + } + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_LEN_HOST, &temp_out_size); + if (err) + return err; + + if (temp_out_size > HIFC_CLP_SRAM_SIZE_REG_MAX || !temp_out_size) { + sdk_err(hwdev->dev_hdl, "invalid temp_out_size:%d\n", + temp_out_size); + return -EINVAL; + } + + *out_size = (u16)(temp_out_size & 0xffff); + for (; temp_out_size > 0; temp_out_size--) { + *ptr = hifc_hwif_read_reg(hwdev->hwif, reg); + ptr++; + reg = reg + 4; + } + + hifc_write_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_READY_RSP_HOST, (u32)0x0); + hifc_write_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_LEN_HOST, (u32)0x0); + + return 0; +} + +static int hifc_write_clp_data(struct hifc_hwdev *hwdev, + void *buf_in, u16 in_size) +{ + int err; + u32 reg = HIFC_CLP_DATA(REQ); + u32 start = 1; + u32 delay_cnt = 0; + u32 *ptr = (u32 *)buf_in; + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_START_REQ_HOST, &start); + if (err) + return err; + + while (start == 1) { + usleep_range(9000, 10000); + delay_cnt++; + err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_START_REQ_HOST, &start); + if (err || delay_cnt > HIFC_CLP_DELAY_CNT_MAX) + return -EINVAL; + } + + hifc_write_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_LEN_HOST, in_size); + hifc_write_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_START_REQ_HOST, (u32)0x1); + + for (; in_size > 0; in_size--) { + hifc_hwif_write_reg(hwdev->hwif, reg, *ptr); + ptr++; + reg = reg + 4; + } + + return 0; +} + +static int hifc_check_clp_init_status(struct hifc_hwdev *hwdev) +{ + int err; + u32 reg_value = 0; + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req ba value:0x%x\n", reg_value); + return -EINVAL; + } + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_BA_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp ba value:0x%x\n", reg_value); + return -EINVAL; + } + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_REQ_HOST, + HIFC_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong req size\n"); + return -EINVAL; + } + + err = hifc_read_clp_reg(hwdev, HIFC_CLP_RSP_HOST, + HIFC_CLP_SIZE_HOST, ®_value); + if (err || !reg_value) { + sdk_err(hwdev->dev_hdl, "Wrong rsp size\n"); + return -EINVAL; + } + + return 0; +} + +static void hifc_clear_clp_data(struct hifc_hwdev *hwdev, + enum clp_data_type data_type) +{ + u32 reg = (data_type == HIFC_CLP_REQ_HOST) ? + HIFC_CLP_DATA(REQ) : HIFC_CLP_DATA(RSP); + u32 count = HIFC_CLP_INPUT_BUFFER_LEN_HOST / HIFC_CLP_DATA_UNIT_HOST; + + for (; count > 0; count--) { + hifc_hwif_write_reg(hwdev->hwif, reg, 0x0); + reg = reg + 4; + } +} + +int hifc_pf_clp_to_mgmt(void *hwdev, enum hifc_mod_type mod, u8 cmd, + const void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt; + struct hifc_hwdev *dev = hwdev; + u64 header; + u16 real_size; + u8 *clp_msg_buf; + int err; + + clp_pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->clp_pf_to_mgmt; + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + /*4 bytes alignment*/ + if (in_size % HIFC_CLP_DATA_UNIT_HOST) + real_size = (in_size + (u16)sizeof(header) + + HIFC_CLP_DATA_UNIT_HOST); + else + real_size = in_size + (u16)sizeof(header); + real_size = real_size / HIFC_CLP_DATA_UNIT_HOST; + + if (real_size > + (HIFC_CLP_INPUT_BUFFER_LEN_HOST / HIFC_CLP_DATA_UNIT_HOST)) { + sdk_err(dev->dev_hdl, "Invalid real_size:%d\n", real_size); + return -EINVAL; + } + down(&clp_pf_to_mgmt->clp_msg_lock); + + err = hifc_check_clp_init_status(dev); + if (err) { + sdk_err(dev->dev_hdl, "Check clp init status failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return err; + } + + hifc_clear_clp_data(dev, HIFC_CLP_RSP_HOST); + hifc_write_clp_reg(dev, HIFC_CLP_RSP_HOST, + HIFC_CLP_READY_RSP_HOST, 0x0); + + /*Send request*/ + memset(clp_msg_buf, 0x0, HIFC_CLP_INPUT_BUFFER_LEN_HOST); + clp_prepare_header(dev, &header, in_size, mod, 0, 0, cmd, 0); + + memcpy(clp_msg_buf, &header, sizeof(header)); + clp_msg_buf += sizeof(header); + memcpy(clp_msg_buf, buf_in, in_size); + + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + hifc_clear_clp_data(dev, HIFC_CLP_REQ_HOST); + err = hifc_write_clp_data(hwdev, + clp_pf_to_mgmt->clp_msg_buf, real_size); + if (err) { + sdk_err(dev->dev_hdl, "Send clp request failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + /*Get response*/ + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + memset(clp_msg_buf, 0x0, HIFC_CLP_INPUT_BUFFER_LEN_HOST); + err = hifc_read_clp_data(hwdev, clp_msg_buf, &real_size); + hifc_clear_clp_data(dev, HIFC_CLP_RSP_HOST); + if (err) { + sdk_err(dev->dev_hdl, "Read clp response failed\n"); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + real_size = (u16)((real_size * HIFC_CLP_DATA_UNIT_HOST) & 0xffff); + if ((real_size <= sizeof(header)) || + (real_size > HIFC_CLP_INPUT_BUFFER_LEN_HOST)) { + sdk_err(dev->dev_hdl, "Invalid response size:%d", real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + real_size = real_size - sizeof(header); + if (real_size != *out_size) { + sdk_err(dev->dev_hdl, "Invalid real_size:%d, out_size:%d\n", + real_size, *out_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + return -EINVAL; + } + + memcpy(buf_out, (clp_msg_buf + sizeof(header)), real_size); + up(&clp_pf_to_mgmt->clp_msg_lock); + + return 0; +} + +/* This function is only used by txrx flush */ +int hifc_pf_to_mgmt_no_ack(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = ((struct hifc_hwdev *)hwdev)->dev_hdl; + int err = -EINVAL; + + if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED)) { + sdk_err(dev, "Mgmt module not initialized\n"); + return -EINVAL; + } + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + + if (!MSG_SZ_IS_VALID(in_size)) { + sdk_err(dev, "Mgmt msg buffer size: %d is not valid\n", + in_size); + return -EINVAL; + } + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + HIFC_MSG_NO_ACK, HIFC_MSG_DIRECT_SEND, + MSG_NO_RESP); + + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + +/** + * api cmd write or read bypass defaut use poll, if want to use aeq interrupt, + * please set wb_trigger_aeqe to 1 + **/ +int hifc_api_cmd_write_nack(void *hwdev, u8 dest, void *cmd, u16 size) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + struct hifc_api_cmd_chain *chain; + + if (!hwdev || !size || !cmd) + return -EINVAL; + + if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED) || + hifc_get_mgmt_channel_status(hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_POLL_WRITE]; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return hifc_api_cmd_write(chain, dest, cmd, size); +} + +int hifc_api_cmd_read_ack(void *hwdev, u8 dest, void *cmd, u16 size, void *ack, + u16 ack_size) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + struct hifc_api_cmd_chain *chain; + + if (!hwdev || !cmd || (ack_size && !ack)) + return -EINVAL; + + if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED) || + hifc_get_mgmt_channel_status(hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hifc_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HIFC_API_CMD_POLL_READ]; + + if (!(((struct hifc_hwdev *)hwdev)->chip_present_flag)) + return -EPERM; + + return hifc_api_cmd_read(chain, dest, cmd, size, ack, ack_size); +} + +static void __send_mgmt_ack(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + enum hifc_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, u16 msg_id) +{ + u16 buf_size; + + if (!in_size) + buf_size = BUF_OUT_DEFAULT_SIZE; + else + buf_size = in_size; + + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + /* MGMT sent sync msg, send the response */ + send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, + buf_in, buf_size, HIFC_MSG_RESPONSE, + msg_id); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); +} + +/** + * mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_recv_msg_handler(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + enum hifc_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, u16 msg_id, int need_resp) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + void *buf_out = pf_to_mgmt->mgmt_ack_buf; + enum hifc_mod_type tmp_mod = mod; + bool ack_first = false; + u16 out_size = 0; + + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); + + if (mod >= HIFC_MOD_HW_MAX) { + sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n", + mod); + goto resp; + } + + set_bit(HIFC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + + if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] || + !test_bit(HIFC_MGMT_MSG_CB_REG, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) { + sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n", + mod); + clear_bit(HIFC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + goto resp; + } + + ack_first = hifc_mgmt_event_ack_first(mod, cmd); + if (ack_first && need_resp) { + /* send ack to mgmt first to avoid command timeout in + * mgmt(100ms in mgmt); + * mgmt to host command don't need any response data from host, + * just need ack from host + */ + __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, in_size, msg_id); + } + + pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->hwdev, + pf_to_mgmt->recv_mgmt_msg_data[tmp_mod], + cmd, buf_in, in_size, + buf_out, &out_size); + + clear_bit(HIFC_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + +resp: + if (!ack_first && need_resp) + __send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size, + msg_id); +} + +/** + * mgmt_resp_msg_handler - handler for response message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_resp_msg_handler(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + struct hifc_recv_msg *recv_msg) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + /* delete async msg */ + if (recv_msg->msg_id & ASYNC_MSG_FLAG) + return; + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id && + pf_to_mgmt->event_flag == SEND_EVENT_START) { + complete(&recv_msg->recv_done); + } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) { + sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } else { + sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state=%d!\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +static void recv_mgmt_msg_work_handler(struct work_struct *work) +{ + struct hifc_mgmt_msg_handle_work *mgmt_work = + container_of(work, struct hifc_mgmt_msg_handle_work, work); + + mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod, + mgmt_work->cmd, mgmt_work->msg, + mgmt_work->msg_len, mgmt_work->msg_id, + !mgmt_work->async_mgmt_to_pf); + + kfree(mgmt_work->msg); + kfree(mgmt_work); +} + +static bool check_mgmt_seq_id_and_seg_len(struct hifc_recv_msg *recv_msg, + u8 seq_id, u8 seg_len) +{ + if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN) + return false; + + if (seq_id == 0) { + recv_msg->seq_id = seq_id; + } else { + if (seq_id != recv_msg->seq_id + 1) + return false; + recv_msg->seq_id = seq_id; + } + + return true; +} + +/** + * recv_mgmt_msg_handler - handler a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + **/ +static void recv_mgmt_msg_handler(struct hifc_msg_pf_to_mgmt *pf_to_mgmt, + u8 *header, struct hifc_recv_msg *recv_msg) +{ + struct hifc_mgmt_msg_handle_work *mgmt_work; + u64 mbox_header = *((u64 *)header); + void *msg_body = header + sizeof(mbox_header); + u8 seq_id, seq_len; + u32 offset; + u64 dir; + + /* Don't need to get anything from hw when cmd is async */ + dir = HIFC_MSG_HEADER_GET(mbox_header, DIRECTION); + if (dir == HIFC_MSG_RESPONSE && + HIFC_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG) + return; + + seq_len = HIFC_MSG_HEADER_GET(mbox_header, SEG_LEN); + seq_id = HIFC_MSG_HEADER_GET(mbox_header, SEQID); + + if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Mgmt msg sequence id and segment length check fail, front seq_id: 0x%x, current seq_id: 0x%x, seg len: 0x%x\n", + recv_msg->seq_id, seq_id, seq_len); + /* set seq_id to invalid seq_id */ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + return; + } + + offset = seq_id * SEGMENT_LEN; + memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len); + + if (!HIFC_MSG_HEADER_GET(mbox_header, LAST)) + return; + + recv_msg->cmd = HIFC_MSG_HEADER_GET(mbox_header, CMD); + recv_msg->mod = HIFC_MSG_HEADER_GET(mbox_header, MODULE); + recv_msg->async_mgmt_to_pf = HIFC_MSG_HEADER_GET(mbox_header, + ASYNC_MGMT_TO_PF); + recv_msg->msg_len = HIFC_MSG_HEADER_GET(mbox_header, MSG_LEN); + recv_msg->msg_id = HIFC_MSG_HEADER_GET(mbox_header, MSG_ID); + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + if (HIFC_MSG_HEADER_GET(mbox_header, DIRECTION) == + HIFC_MSG_RESPONSE) { + mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); + return; + } + + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); + if (!mgmt_work) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, + "Allocate mgmt work memory failed\n"); + return; + } + + if (recv_msg->msg_len) { + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); + if (!mgmt_work->msg) { + sdk_err(pf_to_mgmt->hwdev->dev_hdl, "Allocate mgmt msg memory failed\n"); + kfree(mgmt_work); + return; + } + } + + mgmt_work->pf_to_mgmt = pf_to_mgmt; + mgmt_work->msg_len = recv_msg->msg_len; + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); + mgmt_work->msg_id = recv_msg->msg_id; + mgmt_work->mod = recv_msg->mod; + mgmt_work->cmd = recv_msg->cmd; + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; + + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); + queue_work(pf_to_mgmt->workq, &mgmt_work->work); +} + +/** + * hifc_mgmt_msg_aeqe_handler - handler for a mgmt message event + * @hwdev: the pointer to hw device + * @header: the header of the message + * @size: unused + **/ +void hifc_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size) +{ + struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev; + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + struct hifc_recv_msg *recv_msg; + bool is_send_dir = false; + + pf_to_mgmt = dev->pf_to_mgmt; + + is_send_dir = (HIFC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == + HIFC_MSG_DIRECT_SEND) ? true : false; + + recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); +} + +/** + * alloc_recv_msg - allocate received message memory + * @recv_msg: pointer that will hold the allocated data + * Return: 0 - success, negative - failure + **/ +static int alloc_recv_msg(struct hifc_recv_msg *recv_msg) +{ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->msg) + return -ENOMEM; + + return 0; +} + +/** + * free_recv_msg - free received message memory + * @recv_msg: pointer that holds the allocated data + **/ +static void free_recv_msg(struct hifc_recv_msg *recv_msg) +{ + kfree(recv_msg->msg); +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static int alloc_msg_buf(struct hifc_msg_pf_to_mgmt *pf_to_mgmt) +{ + int err; + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate recv msg\n"); + return err; + } + + err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate resp recv msg\n"); + goto alloc_msg_for_resp_err; + } + + pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->async_msg_buf) { + err = -ENOMEM; + goto async_msg_buf_err; + } + + pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) { + err = -ENOMEM; + goto sync_msg_buf_err; + } + + pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->mgmt_ack_buf) { + err = -ENOMEM; + goto ack_msg_buf_err; + } + + return 0; + +ack_msg_buf_err: + kfree(pf_to_mgmt->sync_msg_buf); + +sync_msg_buf_err: + kfree(pf_to_mgmt->async_msg_buf); + +async_msg_buf_err: + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + +alloc_msg_for_resp_err: + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + return err; +} + +/** + * free_msg_buf - free all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + **/ +static void free_msg_buf(struct hifc_msg_pf_to_mgmt *pf_to_mgmt) +{ + kfree(pf_to_mgmt->mgmt_ack_buf); + kfree(pf_to_mgmt->sync_msg_buf); + kfree(pf_to_mgmt->async_msg_buf); + + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); +} + +/** + * hifc_pf_to_mgmt_init - initialize PF to MGMT channel + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + **/ +int hifc_pf_to_mgmt_init(struct hifc_hwdev *hwdev) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + void *dev = hwdev->dev_hdl; + int err; + + pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); + if (!pf_to_mgmt) + return -ENOMEM; + + hwdev->pf_to_mgmt = pf_to_mgmt; + pf_to_mgmt->hwdev = hwdev; + spin_lock_init(&pf_to_mgmt->async_msg_lock); + spin_lock_init(&pf_to_mgmt->sync_event_lock); + sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->workq = create_singlethread_workqueue(HIFC_MGMT_WQ_NAME); + if (!pf_to_mgmt->workq) { + sdk_err(dev, "Failed to initialize MGMT workqueue\n"); + err = -ENOMEM; + goto create_mgmt_workq_err; + } + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + sdk_err(dev, "Failed to allocate msg buffers\n"); + goto alloc_msg_buf_err; + } + + err = hifc_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); + if (err) { + sdk_err(dev, "Failed to init the api cmd chains\n"); + goto api_cmd_init_err; + } + + return 0; + +api_cmd_init_err: + free_msg_buf(pf_to_mgmt); + +alloc_msg_buf_err: + destroy_workqueue(pf_to_mgmt->workq); + +create_mgmt_workq_err: + kfree(pf_to_mgmt); + + return err; +} + +/** + * hifc_pf_to_mgmt_free - free PF to MGMT channel + * @hwdev: the pointer to hw device + **/ +void hifc_pf_to_mgmt_free(struct hifc_hwdev *hwdev) +{ + struct hifc_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + + /* destroy workqueue before free related pf_to_mgmt resources in case of + * illegal resource access + */ + destroy_workqueue(pf_to_mgmt->workq); + hifc_api_cmd_free(pf_to_mgmt->cmd_chain); + free_msg_buf(pf_to_mgmt); + kfree(pf_to_mgmt); +} + +void hifc_flush_mgmt_workq(void *hwdev) +{ + struct hifc_hwdev *dev = (struct hifc_hwdev *)hwdev; + + flush_workqueue(dev->aeqs->workq); + + if (hifc_func_type(dev) != TYPE_VF && + hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_MGMT_INITED)) + flush_workqueue(dev->pf_to_mgmt->workq); +} + +int hifc_clp_pf_to_mgmt_init(struct hifc_hwdev *hwdev) +{ + struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt; + + clp_pf_to_mgmt = kzalloc(sizeof(*clp_pf_to_mgmt), GFP_KERNEL); + if (!clp_pf_to_mgmt) + return -ENOMEM; + + clp_pf_to_mgmt->clp_msg_buf = kzalloc(HIFC_CLP_INPUT_BUFFER_LEN_HOST, + GFP_KERNEL); + if (!clp_pf_to_mgmt->clp_msg_buf) { + kfree(clp_pf_to_mgmt); + return -ENOMEM; + } + sema_init(&clp_pf_to_mgmt->clp_msg_lock, 1); + + hwdev->clp_pf_to_mgmt = clp_pf_to_mgmt; + + return 0; +} + +void hifc_clp_pf_to_mgmt_free(struct hifc_hwdev *hwdev) +{ + struct hifc_clp_pf_to_mgmt *clp_pf_to_mgmt = hwdev->clp_pf_to_mgmt; + + kfree(clp_pf_to_mgmt->clp_msg_buf); + kfree(clp_pf_to_mgmt); +} + diff --git a/drivers/scsi/huawei/hifc/hifc_mgmt.h b/drivers/scsi/huawei/hifc/hifc_mgmt.h new file mode 100644 index 000000000000..2adcfe2968c1 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_mgmt.h @@ -0,0 +1,407 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_MGMT_H_ +#define HIFC_MGMT_H_ + +#define HIFC_MSG_HEADER_MSG_LEN_SHIFT 0 +#define HIFC_MSG_HEADER_MODULE_SHIFT 11 +#define HIFC_MSG_HEADER_SEG_LEN_SHIFT 16 +#define HIFC_MSG_HEADER_NO_ACK_SHIFT 22 +#define HIFC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 +#define HIFC_MSG_HEADER_SEQID_SHIFT 24 +#define HIFC_MSG_HEADER_LAST_SHIFT 30 +#define HIFC_MSG_HEADER_DIRECTION_SHIFT 31 +#define HIFC_MSG_HEADER_CMD_SHIFT 32 +#define HIFC_MSG_HEADER_PCI_INTF_IDX_SHIFT 48 +#define HIFC_MSG_HEADER_P2P_IDX_SHIFT 50 +#define HIFC_MSG_HEADER_MSG_ID_SHIFT 54 + +#define HIFC_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define HIFC_MSG_HEADER_MODULE_MASK 0x1F +#define HIFC_MSG_HEADER_SEG_LEN_MASK 0x3F +#define HIFC_MSG_HEADER_NO_ACK_MASK 0x1 +#define HIFC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 +#define HIFC_MSG_HEADER_SEQID_MASK 0x3F +#define HIFC_MSG_HEADER_LAST_MASK 0x1 +#define HIFC_MSG_HEADER_DIRECTION_MASK 0x1 +#define HIFC_MSG_HEADER_CMD_MASK 0xFF +#define HIFC_MSG_HEADER_PCI_INTF_IDX_MASK 0x3 +#define HIFC_MSG_HEADER_P2P_IDX_MASK 0xF +#define HIFC_MSG_HEADER_MSG_ID_MASK 0x3FF + +#define HIFC_MSG_HEADER_GET(val, member) \ + (((val) >> HIFC_MSG_HEADER_##member##_SHIFT) & \ + HIFC_MSG_HEADER_##member##_MASK) + +#define HIFC_MSG_HEADER_SET(val, member) \ + ((u64)((val) & HIFC_MSG_HEADER_##member##_MASK) << \ + HIFC_MSG_HEADER_##member##_SHIFT) + +#define HIFC_MGMT_WQ_NAME "hifc_mgmt" + +/*CLP*/ +enum clp_data_type { + HIFC_CLP_REQ_HOST = 0, + HIFC_CLP_RSP_HOST = 1 +}; + +enum clp_reg_type { + HIFC_CLP_BA_HOST = 0, + HIFC_CLP_SIZE_HOST = 1, + HIFC_CLP_LEN_HOST = 2, + HIFC_CLP_START_REQ_HOST = 3, + HIFC_CLP_READY_RSP_HOST = 4 +}; + +/* cmd of mgmt CPU message for HW module */ +enum hifc_mgmt_cmd { + HIFC_MGMT_CMD_RESET_MGMT = 0x0, + HIFC_MGMT_CMD_START_FLR = 0x1, + HIFC_MGMT_CMD_FLUSH_DOORBELL = 0x2, + HIFC_MGMT_CMD_CMDQ_CTXT_SET = 0x10, + HIFC_MGMT_CMD_VAT_SET = 0x12, + HIFC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET = 0x14, + HIFC_MGMT_CMD_PPF_TMR_SET = 0x22, + HIFC_MGMT_CMD_PPF_HT_GPA_SET = 0x23, + HIFC_MGMT_CMD_RES_STATE_SET = 0x24, + HIFC_MGMT_CMD_FUNC_TMR_BITMAT_SET = 0x32, + HIFC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33, + HIFC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP, + HIFC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP, + HIFC_MGMT_CMD_FAULT_REPORT = 0x37, + HIFC_MGMT_CMD_HEART_LOST_REPORT = 0x38, + HIFC_MGMT_CMD_SYNC_TIME = 0x46, + HIFC_MGMT_CMD_REG_READ = 0x48, + HIFC_MGMT_CMD_L2NIC_RESET = 0x4b, + HIFC_MGMT_CMD_ACTIVATE_FW = 0x4F, + HIFC_MGMT_CMD_PAGESIZE_SET = 0x50, + HIFC_MGMT_CMD_GET_BOARD_INFO = 0x52, + HIFC_MGMT_CMD_WATCHDOG_INFO = 0x56, + HIFC_MGMT_CMD_FMW_ACT_NTC = 0x57, + HIFC_MGMT_CMD_PCIE_DFX_NTC = 0x65, + HIFC_MGMT_CMD_PCIE_DFX_GET = 0x66, + HIFC_MGMT_CMD_GET_HOST_INFO = 0x67, + HIFC_MGMT_CMD_GET_PHY_INIT_STATUS = 0x6A, + HIFC_MGMT_CMD_HEARTBEAT_EVENT = 0x6C, +}; + +#define HIFC_CLP_REG_GAP 0x20 +#define HIFC_CLP_INPUT_BUFFER_LEN_HOST 2048UL +#define HIFC_CLP_OUTPUT_BUFFER_LEN_HOST 2048UL +#define HIFC_CLP_DATA_UNIT_HOST 4UL +#define HIFC_BAR01_GLOABAL_CTL_OFFSET 0x4000 +#define HIFC_BAR01_CLP_OFFSET 0x5000 + +#define HIFC_CLP_SRAM_SIZE_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x220) +#define HIFC_CLP_REQ_SRAM_BA_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x224) +#define HIFC_CLP_RSP_SRAM_BA_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x228) +#define HIFC_CLP_REQ_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x22c) +#define HIFC_CLP_RSP_REG (HIFC_BAR01_GLOABAL_CTL_OFFSET + 0x230) +#define HIFC_CLP_REG(member) (HIFC_CLP_##member##_REG) + +#define HIFC_CLP_REQ_DATA (HIFC_BAR01_CLP_OFFSET) +#define HIFC_CLP_RSP_DATA (HIFC_BAR01_CLP_OFFSET + 0x1000) +#define HIFC_CLP_DATA(member) (HIFC_CLP_##member##_DATA) + +#define HIFC_CLP_SRAM_SIZE_OFFSET 16 +#define HIFC_CLP_SRAM_BASE_OFFSET 0 +#define HIFC_CLP_LEN_OFFSET 0 +#define HIFC_CLP_START_OFFSET 31 +#define HIFC_CLP_READY_OFFSET 31 +#define HIFC_CLP_OFFSET(member) (HIFC_CLP_##member##_OFFSET) + +#define HIFC_CLP_SRAM_SIZE_BIT_LEN 0x7ffUL +#define HIFC_CLP_SRAM_BASE_BIT_LEN 0x7ffffffUL +#define HIFC_CLP_LEN_BIT_LEN 0x7ffUL +#define HIFC_CLP_START_BIT_LEN 0x1UL +#define HIFC_CLP_READY_BIT_LEN 0x1UL +#define HIFC_CLP_MASK(member) (HIFC_CLP_##member##_BIT_LEN) + +#define HIFC_CLP_DELAY_CNT_MAX 200UL +#define HIFC_CLP_SRAM_SIZE_REG_MAX 0x3ff +#define HIFC_CLP_SRAM_BASE_REG_MAX 0x7ffffff +#define HIFC_CLP_LEN_REG_MAX 0x3ff +#define HIFC_CLP_START_OR_READY_REG_MAX 0x1 +#define HIFC_MGMT_CMD_UNSUPPORTED 0xFF + +enum hifc_msg_direction_type { + HIFC_MSG_DIRECT_SEND = 0, + HIFC_MSG_RESPONSE = 1 +}; + +enum hifc_msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum hifc_mgmt_msg_type { + ASYNC_MGMT_MSG = 0, + SYNC_MGMT_MSG = 1, +}; + +enum hifc_msg_ack_type { + HIFC_MSG_ACK = 0, + HIFC_MSG_NO_ACK = 1, +}; + +struct hifc_recv_msg { + void *msg; + + struct completion recv_done; + + u16 msg_len; + enum hifc_mod_type mod; + u8 cmd; + u8 seq_id; + u16 msg_id; + int async_mgmt_to_pf; +}; + +struct hifc_msg_head { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; +}; + +#define HIFC_COMM_SELF_CMD_MAX 8 + +struct comm_up_self_msg_sub_info { + u8 cmd; + comm_up_self_msg_proc proc; +}; + +struct comm_up_self_msg_info { + u8 cmd_num; + struct comm_up_self_msg_sub_info info[HIFC_COMM_SELF_CMD_MAX]; +}; + +enum comm_pf_to_mgmt_event_state { + SEND_EVENT_UNINIT = 0, + SEND_EVENT_START, + SEND_EVENT_FAIL, + SEND_EVENT_TIMEOUT, + SEND_EVENT_END, +}; + +enum hifc_mgmt_msg_cb_state { + HIFC_MGMT_MSG_CB_REG = 0, + HIFC_MGMT_MSG_CB_RUNNING, +}; + +struct hifc_clp_pf_to_mgmt { + struct semaphore clp_msg_lock; + void *clp_msg_buf; +}; + +struct hifc_msg_pf_to_mgmt { + struct hifc_hwdev *hwdev; + + /* Async cmd can not be scheduling */ + spinlock_t async_msg_lock; + struct semaphore sync_msg_lock; + + struct workqueue_struct *workq; + + void *async_msg_buf; + void *sync_msg_buf; + void *mgmt_ack_buf; + + struct hifc_recv_msg recv_msg_from_mgmt; + struct hifc_recv_msg recv_resp_msg_from_mgmt; + + u16 async_msg_id; + u16 sync_msg_id; + + struct hifc_api_cmd_chain *cmd_chain[HIFC_API_CMD_MAX]; + + hifc_mgmt_msg_cb recv_mgmt_msg_cb[HIFC_MOD_HW_MAX]; + void *recv_mgmt_msg_data[HIFC_MOD_HW_MAX]; + unsigned long mgmt_msg_cb_state[HIFC_MOD_HW_MAX]; + + struct comm_up_self_msg_info proc; + + /* lock when sending msg */ + spinlock_t sync_event_lock; + enum comm_pf_to_mgmt_event_state event_flag; +}; + +struct hifc_mgmt_msg_handle_work { + struct work_struct work; + struct hifc_msg_pf_to_mgmt *pf_to_mgmt; + void *msg; + u16 msg_len; + enum hifc_mod_type mod; + u8 cmd; + u16 msg_id; + int async_mgmt_to_pf; +}; + +/* show each drivers only such as nic_service_cap, + * toe_service_cap structure, but not show service_cap + */ +enum hifc_service_type { + SERVICE_T_NIC = 0, + + SERVICE_T_FC = 5, + + SERVICE_T_MAX, + + /* Only used for interruption resource management, + * mark the request module + */ + SERVICE_T_INTF = (1 << 15), + SERVICE_T_CQM = (1 << 16), +}; + +/* NIC service capability + * 1, The chip supports NIC RQ is 1K + * 2, PF/VF RQ specifications: + * disable RSS: + * disable VMDq: Each PF/VF at most 8 RQ + * enable the VMDq: Each PF/VF at most 1K RQ + * enable the RSS: + * disable VMDq: each PF at most 64 RQ, VF at most 32 RQ + * enable the VMDq: Each PF/VF at most 1K RQ + * + * 3, The chip supports NIC SQ is 1K + * 4, PF/VF SQ specifications: + * disable RSS: + * disable VMDq: Each PF/VF at most 8 SQ + * enable the VMDq: Each PF/VF at most 1K SQ + * enable the RSS: + * disable VMDq: each PF at most 64 SQ, VF at most 32 SQ + * enable the VMDq: Each PF/VF at most 1K SQ + */ +struct nic_service_cap { + /* PF resources*/ + u16 max_sqs; + u16 max_rqs; + + /* VF resources, vf obtain through the MailBox mechanism from + * according PF + */ + u16 vf_max_sqs; + u16 vf_max_rqs; + bool lro_en; /* LRO feature enable bit*/ + u8 lro_sz; /* LRO context space: n*16B */ + u8 tso_sz; /* TSO context space: n*16B */ + + u16 max_queue_allowed; +}; + +/* PF FC service resource structure defined*/ +struct dev_fc_svc_cap { + /* PF Parent QPC */ + u32 max_parent_qpc_num; /* max number is 2048*/ + + /* PF Child QPC */ + u32 max_child_qpc_num; /* max number is 2048*/ + + /* PF SCQ */ + u32 scq_num; /* 16 */ + + /* PF supports SRQ*/ + u32 srq_num; /* Number of SRQ is 2*/ + + u8 vp_id_start; + u8 vp_id_end; +}; + +/* FC services*/ +struct fc_service_cap { + struct dev_fc_svc_cap dev_fc_cap; + + /* Parent QPC */ + u32 parent_qpc_size; /* 256B */ + + /* Child QPC */ + u32 child_qpc_size; /* 256B */ + + /* SQ */ + u32 sqe_size; /* 128B(in linked list mode)*/ + + /* SCQ */ + u32 scqc_size; /* Size of the Context 32B*/ + u32 scqe_size; /* 64B */ + + /* SRQ */ + u32 srqc_size; /* Size of SRQ Context (64B)*/ + u32 srqe_size; /* 32B */ +}; + +bool hifc_support_fc(void *hwdev, struct fc_service_cap *cap); + +/* Service interface for obtaining service_cap public fields*/ +/* Obtain service_cap.host_oq_id_mask_val*/ +u8 hifc_host_oq_id_mask(void *hwdev); + +/* Obtain service_cap.dev_cap.max_sqs*/ +u16 hifc_func_max_qnum(void *hwdev); + +/* The following information is obtained from the bar space + * which is recorded by SDK layer. + * Here provide parameter query interface for service + */ +/* func_attr.glb_func_idx, global function index */ +u16 hifc_global_func_id(void *hwdev); +/* func_attr.intr_num, MSI-X table entry in function*/ +enum intr_type { + INTR_TYPE_MSIX, + INTR_TYPE_MSI, + INTR_TYPE_INT, + INTR_TYPE_NONE, +}; + +u8 hifc_pcie_itf_id(void *hwdev); /* func_attr.itf_idx, pcie interface index */ + +/* func_attr.func_type, 0-PF 1-VF 2-PPF */ +enum func_type hifc_func_type(void *hwdev); + +u8 hifc_ppf_idx(void *hwdev); + +enum hifc_msix_state { + HIFC_MSIX_ENABLE, + HIFC_MSIX_DISABLE, +}; + +void hifc_set_msix_state(void *hwdev, u16 msix_idx, + enum hifc_msix_state flag); + +/* Defines the IRQ information structure*/ +struct irq_info { + u16 msix_entry_idx; /* IRQ corresponding index number */ + u32 irq_id; /* the IRQ number from OS */ +}; + +int hifc_alloc_irqs(void *hwdev, enum hifc_service_type type, u16 req_num, + struct irq_info *irq_info_array, u16 *resp_num); +void hifc_free_irq(void *hwdev, enum hifc_service_type type, u32 irq_id); + +int hifc_sync_time(void *hwdev, u64 time); +void hifc_disable_mgmt_msg_report(void *hwdev); +void hifc_set_func_deinit_flag(void *hwdev); +void hifc_flush_mgmt_workq(void *hwdev); +int hifc_global_func_id_get(void *hwdev, u16 *func_id); +u16 hifc_global_func_id_hw(void *hwdev); +int hifc_pf_to_mgmt_no_ack(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size); +void hifc_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size); +int hifc_pf_to_mgmt_init(struct hifc_hwdev *hwdev); +void hifc_pf_to_mgmt_free(struct hifc_hwdev *hwdev); +int hifc_pf_to_mgmt_sync(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); +int hifc_pf_to_mgmt_async(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size); +int hifc_pf_clp_to_mgmt(void *hwdev, enum hifc_mod_type mod, u8 cmd, + const void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +int hifc_clp_pf_to_mgmt_init(struct hifc_hwdev *hwdev); +void hifc_clp_pf_to_mgmt_free(struct hifc_hwdev *hwdev); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_sml.c b/drivers/scsi/huawei/hifc/hifc_sml.c new file mode 100644 index 000000000000..2d04ff6ed5ff --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_sml.c @@ -0,0 +1,361 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_sml.h" + +#ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) \ + | (((x) & 0x0000ff00) << 8) \ + | (((x) & 0x00ff0000) >> 8) \ + | (((x) & 0xff000000) >> 24)) +#endif + +static void sml_ctr_htonl_n(u32 *node, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) { + *node = HTONL(*node); + node++; + } +} + +static void hifc_sml_ctr_read_build_req(struct chipif_sml_ctr_rd_req_s *msg, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, u32 init_val) +{ + msg->head.value = 0; + msg->head.bs.instance = instance_id; + msg->head.bs.op_id = op_id; + msg->head.bs.ack = ack; + msg->head.value = HTONL(msg->head.value); + + msg->ctr_id = ctr_id; + msg->ctr_id = HTONL(msg->ctr_id); + + msg->initial = init_val; +} + +static void hifc_sml_ctr_write_build_req(struct chipif_sml_ctr_wr_req_s *msg, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id, + u64 val1, u64 val2) +{ + msg->head.value = 0; + msg->head.bs.instance = instance_id; + msg->head.bs.op_id = op_id; + msg->head.bs.ack = ack; + msg->head.value = HTONL(msg->head.value); + + msg->ctr_id = ctr_id; + msg->ctr_id = HTONL(msg->ctr_id); + + msg->value1_h = val1 >> 32; + msg->value1_l = val1 & 0xFFFFFFFF; + + msg->value2_h = val2 >> 32; + msg->value2_l = val2 & 0xFFFFFFFF; +} + +/** + * hifc_sm_ctr_rd32 - small single 32 counter read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value) +{ + struct chipif_sml_ctr_rd_req_s req; + union ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter read fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} + +/** + * hifc_sm_ctr_rd32_clear - small single 32 counter read and clear to zero + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + * according to ACN error code (ERR_OK, ERR_PARAM, ERR_FAILED...etc) + */ +int hifc_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value) +{ + struct chipif_sml_ctr_rd_req_s req; + union ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hifc_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id, 0); + + ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, (unsigned short)sizeof(rsp)); + + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter clear fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} + +/** + * hifc_sm_ctr_wr32 - small single 32 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: write counter value + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_wr32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 value) +{ + struct chipif_sml_ctr_wr_req_s req; + struct chipif_sml_ctr_wr_rsp_s rsp; + + if (!hwdev) + return -EFAULT; + + hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, (u64)value, 0ULL); + + return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} + +/** + * hifc_sm_ctr_rd64 - big counter 64 read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value) +{ + struct chipif_sml_ctr_rd_req_s req; + union ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Sm 64bit counter read fail err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value = ((u64)rsp.bs_bs64_rsp.value1 << 32) | rsp.bs_bs64_rsp.value2; + + return 0; +} + +/** + * hifc_sm_ctr_wr64 - big single 64 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value: write counter value + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_wr64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 value) +{ + struct chipif_sml_ctr_wr_req_s req; + struct chipif_sml_ctr_wr_rsp_s rsp; + + if (!hwdev) + return -EFAULT; + + hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, value, 0ULL); + + return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} + +/** + * hifc_sm_ctr_rd64_pair - big pair 128 counter read + * @hwdev: the pointer to hw device + * @node: the node id + * @instance: instance value + * @ctr_id: counter id + * @value1: read counter value ptr + * @value2: read counter value ptr + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value1, u64 *value2) +{ + struct chipif_sml_ctr_rd_req_s req; + union ctr_rd_rsp_u rsp; + int ret; + + if (!hwdev || (0 != (ctr_id & 0x1)) || !value1 || !value2) { + pr_err("Hwdev(0x%p) or value1(0x%p) or value2(0x%p) is NULL or ctr_id(%d) is odd number\n", + hwdev, value1, value2, ctr_id); + return -EFAULT; + } + + hifc_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id, 0); + + ret = hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Sm 64 bit rd pair ret(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)&rsp, 4); + *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << 32) | rsp.bs_bp64_rsp.val1_l; + *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << 32) | rsp.bs_bp64_rsp.val2_l; + + return 0; +} + +/** + * hifc_sm_ctr_wr64_pair - big pair 128 counter write + * @hwdev: the pointer to hw device + * @node: the node id + * @ctr_id: counter id + * @instance: instance value + * @value1: write counter value + * @value2: write counter value + * Return: 0 - success, negative - failure + */ +int hifc_sm_ctr_wr64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 value1, u64 value2) +{ + struct chipif_sml_ctr_wr_req_s req; + struct chipif_sml_ctr_wr_rsp_s rsp; + + /* pair pattern ctr_id must be even number */ + if (!hwdev || (0 != (ctr_id & 0x1))) { + pr_err("Handle is NULL or ctr_id(%d) is odd number for write 64 bit pair\n", + ctr_id); + return -EFAULT; + } + + hifc_sml_ctr_write_build_req(&req, instance, CHIPIF_SM_CTR_OP_WRITE, + CHIPIF_NOACK, ctr_id, value1, value2); + return hifc_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); +} + +int hifc_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val) +{ + struct hifc_csr_request_api_data api_data = {0}; + u32 csr_val = 0; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + memset(&api_data, 0, sizeof(struct hifc_csr_request_api_data)); + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HIFC_CSR_OPERATION_READ_CSR; + api_data.dw1.bits.need_response = HIFC_CSR_NEED_RESP_DATA; + api_data.dw1.bits.data_size = HIFC_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + + ret = hifc_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data), + in_size, &csr_val, 4); + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Read 32 bit csr fail, dest %d addr 0x%x, ret: 0x%x\n", + dest, addr, ret); + return ret; + } + + *val = csr_val; + + return 0; +} + +int hifc_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val) +{ + struct hifc_csr_request_api_data api_data; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev) + return -EFAULT; + + memset(&api_data, 0, sizeof(struct hifc_csr_request_api_data)); + api_data.dw1.bits.operation_id = HIFC_CSR_OPERATION_WRITE_CSR; + api_data.dw1.bits.need_response = HIFC_CSR_NO_RESP_DATA; + api_data.dw1.bits.data_size = HIFC_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + api_data.csr_write_data_h = 0xffffffff; + api_data.csr_write_data_l = val; + + ret = hifc_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), in_size); + if (ret) { + sdk_err(((struct hifc_hwdev *)hwdev)->dev_hdl, + "Write 32 bit csr fail! dest %d addr 0x%x val 0x%x\n", + dest, addr, val); + return ret; + } + + return 0; +} + diff --git a/drivers/scsi/huawei/hifc/hifc_sml.h b/drivers/scsi/huawei/hifc/hifc_sml.h new file mode 100644 index 000000000000..9fe2088f48a1 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_sml.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __CHIPIF_SML_COUNTER_H__ +#define __CHIPIF_SML_COUNTER_H__ + +#define CHIPIF_FUNC_PF 0 +#define CHIPIF_FUNC_VF 1 +#define CHIPIF_FUNC_PPF 2 + +#define CHIPIF_ACK 1 +#define CHIPIF_NOACK 0 + +#define CHIPIF_SM_CTR_OP_READ 0x2 +#define CHIPIF_SM_CTR_OP_READ_CLEAR 0x6 +#define CHIPIF_SM_CTR_OP_WRITE 0x3 + +#define SMALL_CNT_READ_RSP_SIZE 16 + +/* request head */ +union chipif_sml_ctr_req_head_u { + struct { + u32 pad:15; + u32 ack:1; + u32 op_id:5; + u32 instance:6; + u32 src:5; + } bs; + + u32 value; +}; + +/* counter read request struct */ +struct chipif_sml_ctr_rd_req_s { + u32 extra; + union chipif_sml_ctr_req_head_u head; + u32 ctr_id; + u32 initial; + u32 pad; +}; + +/* counter read response union */ +union ctr_rd_rsp_u { + struct { + u32 value1:16; + u32 pad0:16; + u32 pad1[3]; + } bs_ss16_rsp; + + struct { + u32 value1; + u32 pad[3]; + } bs_ss32_rsp; + + struct { + u32 value1:20; + u32 pad0:12; + u32 value2:12; + u32 pad1:20; + u32 pad2[2]; + } bs_sp_rsp; + + struct { + u32 value1; + u32 value2; + u32 pad[2]; + } bs_bs64_rsp; + + struct { + u32 val1_h; + u32 val1_l; + u32 val2_h; + u32 val2_l; + } bs_bp64_rsp; + +}; + +/* resopnse head */ +union sml_ctr_rsp_head_u { + struct { + u32 pad:30; /* reserve */ + u32 code:2; /* error code */ + } bs; + + u32 value; +}; + +/* counter write request struct */ +struct chipif_sml_ctr_wr_req_s { + u32 extra; + union chipif_sml_ctr_req_head_u head; + u32 ctr_id; + u32 rsv1; + u32 rsv2; + u32 value1_h; + u32 value1_l; + u32 value2_h; + u32 value2_l; +}; + +/* counter write response struct */ +struct chipif_sml_ctr_wr_rsp_s { + union sml_ctr_rsp_head_u head; + u32 pad[3]; +}; + +enum HIFC_CSR_API_DATA_OPERATION_ID { + HIFC_CSR_OPERATION_WRITE_CSR = 0x1E, + HIFC_CSR_OPERATION_READ_CSR = 0x1F +}; + +enum HIFC_CSR_API_DATA_NEED_RESPONSE_DATA { + HIFC_CSR_NO_RESP_DATA = 0, + HIFC_CSR_NEED_RESP_DATA = 1 +}; + +enum HIFC_CSR_API_DATA_DATA_SIZE { + HIFC_CSR_DATA_SZ_32 = 0, + HIFC_CSR_DATA_SZ_64 = 1 +}; + +struct hifc_csr_request_api_data { + u32 dw0; + + union { + struct { + u32 reserved1:13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size:2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response:1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id:5; + u32 reserved2:6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id:5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr:26; + u32 reserved3:6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; + +int hifc_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, u32 *value); +int hifc_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, u64 *value); +int hifc_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value1, u64 *value2); + +#endif + diff --git a/drivers/scsi/huawei/hifc/hifc_wq.c b/drivers/scsi/huawei/hifc/hifc_wq.c new file mode 100644 index 000000000000..4e926d140b2c --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_wq.c @@ -0,0 +1,624 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/device.h> +#include <linux/vmalloc.h> +#include <linux/types.h> +#include <linux/atomic.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_wq.h" + +#define WQS_MAX_NUM_BLOCKS 128 +#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \ + sizeof((wqs)->free_blocks[0])) + +static void wqs_return_block(struct hifc_wqs *wqs, u32 page_idx, u32 block_idx) +{ + u32 pos; + + spin_lock(&wqs->alloc_blocks_lock); + + wqs->num_free_blks++; + + pos = wqs->return_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = block_idx; + + spin_unlock(&wqs->alloc_blocks_lock); +} + +static int wqs_next_block(struct hifc_wqs *wqs, u32 *page_idx, + u32 *block_idx) +{ + u32 pos; + + spin_lock(&wqs->alloc_blocks_lock); + + if (wqs->num_free_blks <= 0) { + spin_unlock(&wqs->alloc_blocks_lock); + return -ENOMEM; + } + wqs->num_free_blks--; + + pos = wqs->alloc_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + *page_idx = wqs->free_blocks[pos].page_idx; + *block_idx = wqs->free_blocks[pos].block_idx; + + wqs->free_blocks[pos].page_idx = 0xFFFFFFFF; + wqs->free_blocks[pos].block_idx = 0xFFFFFFFF; + + spin_unlock(&wqs->alloc_blocks_lock); + + return 0; +} + +static int queue_alloc_page(void *handle, u64 **vaddr, u64 *paddr, + u64 **shadow_vaddr, u64 page_sz) +{ + dma_addr_t dma_addr = 0; + + *vaddr = dma_zalloc_coherent(handle, page_sz, &dma_addr, + GFP_KERNEL); + if (!*vaddr) { + sdk_err(handle, "Failed to allocate dma to wqs page\n"); + return -ENOMEM; + } + + if (!ADDR_4K_ALIGNED(dma_addr)) { + sdk_err(handle, "Cla is not 4k aligned!\n"); + goto shadow_vaddr_err; + } + + *paddr = (u64)dma_addr; + + /* use vzalloc for big mem, shadow_vaddr only used at initialization */ + *shadow_vaddr = vzalloc(page_sz); + if (!*shadow_vaddr) { + sdk_err(handle, "Failed to allocate shadow page vaddr\n"); + goto shadow_vaddr_err; + } + + return 0; + +shadow_vaddr_err: + dma_free_coherent(handle, page_sz, *vaddr, dma_addr); + return -ENOMEM; +} + +static int wqs_allocate_page(struct hifc_wqs *wqs, u32 page_idx) +{ + return queue_alloc_page(wqs->dev_hdl, &wqs->page_vaddr[page_idx], + &wqs->page_paddr[page_idx], + &wqs->shadow_page_vaddr[page_idx], + WQS_PAGE_SIZE); +} + +static void wqs_free_page(struct hifc_wqs *wqs, u32 page_idx) +{ + dma_free_coherent(wqs->dev_hdl, WQS_PAGE_SIZE, + wqs->page_vaddr[page_idx], + (dma_addr_t)wqs->page_paddr[page_idx]); + vfree(wqs->shadow_page_vaddr[page_idx]); +} + +static int cmdq_allocate_page(struct hifc_cmdq_pages *cmdq_pages) +{ + return queue_alloc_page(cmdq_pages->dev_hdl, + &cmdq_pages->cmdq_page_vaddr, + &cmdq_pages->cmdq_page_paddr, + &cmdq_pages->cmdq_shadow_page_vaddr, + CMDQ_PAGE_SIZE); +} + +static void cmdq_free_page(struct hifc_cmdq_pages *cmdq_pages) +{ + dma_free_coherent(cmdq_pages->dev_hdl, CMDQ_PAGE_SIZE, + cmdq_pages->cmdq_page_vaddr, + (dma_addr_t)cmdq_pages->cmdq_page_paddr); + vfree(cmdq_pages->cmdq_shadow_page_vaddr); +} + +static int alloc_wqes_shadow(struct hifc_wq *wq) +{ + u64 size; + + /* if wq->max_wqe_size == 0, we don't need to alloc shadow */ + if (wq->max_wqe_size <= wq->wqebb_size) + return 0; + + size = (u64)wq->num_q_pages * wq->max_wqe_size; + wq->shadow_wqe = kzalloc(size, GFP_KERNEL); + if (!wq->shadow_wqe) { + pr_err("Failed to allocate shadow wqe\n"); + return -ENOMEM; + } + + size = wq->num_q_pages * sizeof(wq->prod_idx); + wq->shadow_idx = kzalloc(size, GFP_KERNEL); + if (!wq->shadow_idx) { + pr_err("Failed to allocate shadow index\n"); + goto shadow_idx_err; + } + + return 0; + +shadow_idx_err: + kfree(wq->shadow_wqe); + return -ENOMEM; +} + +static void free_wqes_shadow(struct hifc_wq *wq) +{ + if (wq->max_wqe_size <= wq->wqebb_size) + return; + + kfree(wq->shadow_idx); + kfree(wq->shadow_wqe); +} + +static void free_wq_pages(void *handle, struct hifc_wq *wq, + u32 num_q_pages) +{ + u32 i; + + for (i = 0; i < num_q_pages; i++) + hifc_dma_free_coherent_align(handle, &wq->mem_align[i]); + + free_wqes_shadow(wq); + + wq->block_vaddr = NULL; + wq->shadow_block_vaddr = NULL; + + kfree(wq->mem_align); +} + +static int alloc_wq_pages(void *dev_hdl, struct hifc_wq *wq) +{ + struct hifc_dma_addr_align *mem_align; + u64 *vaddr, *paddr; + u32 i, num_q_pages; + int err; + + vaddr = wq->shadow_block_vaddr; + paddr = wq->block_vaddr; + + num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; + if (num_q_pages > WQ_MAX_PAGES) { + sdk_err(dev_hdl, "Number(%d) wq pages exceeds the limit\n", + num_q_pages); + return -EINVAL; + } + + if (num_q_pages & (num_q_pages - 1)) { + sdk_err(dev_hdl, "Wq num(%d) q pages must be power of 2\n", + num_q_pages); + return -EINVAL; + } + + wq->num_q_pages = num_q_pages; + + err = alloc_wqes_shadow(wq); + if (err) { + sdk_err(dev_hdl, "Failed to allocate wqe shadow\n"); + return err; + } + + wq->mem_align = kcalloc(wq->num_q_pages, sizeof(*wq->mem_align), + GFP_KERNEL); + if (!wq->mem_align) { + sdk_err(dev_hdl, "Failed to allocate mem_align\n"); + free_wqes_shadow(wq); + return -ENOMEM; + } + + for (i = 0; i < num_q_pages; i++) { + mem_align = &wq->mem_align[i]; + err = hifc_dma_zalloc_coherent_align(dev_hdl, wq->wq_page_size, + wq->wq_page_size, + GFP_KERNEL, mem_align); + if (err) { + sdk_err(dev_hdl, "Failed to allocate wq page\n"); + goto alloc_wq_pages_err; + } + + *paddr = cpu_to_be64(mem_align->align_paddr); + *vaddr = (u64)mem_align->align_vaddr; + + paddr++; + vaddr++; + } + + return 0; + +alloc_wq_pages_err: + free_wq_pages(dev_hdl, wq, i); + + return -ENOMEM; +} + +int hifc_wq_allocate(struct hifc_wqs *wqs, struct hifc_wq *wq, + u32 wqebb_size, u32 wq_page_size, u16 q_depth, + u32 max_wqe_size) +{ + u32 num_wqebbs_per_page; + int err; + + if (wqebb_size == 0) { + sdk_err(wqs->dev_hdl, "Wqebb_size must be >0\n"); + return -EINVAL; + } + + if (q_depth & (q_depth - 1)) { + sdk_err(wqs->dev_hdl, "Wq q_depth(%d) isn't power of 2\n", + q_depth); + return -EINVAL; + } + + if (wq_page_size & (wq_page_size - 1)) { + sdk_err(wqs->dev_hdl, "Wq page_size(%d) isn't power of 2\n", + wq_page_size); + return -EINVAL; + } + + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + sdk_err(wqs->dev_hdl, "Num(%d) wqebbs per page isn't power of 2\n", + num_wqebbs_per_page); + return -EINVAL; + } + + err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); + if (err) { + sdk_err(wqs->dev_hdl, "Failed to get free wqs next block\n"); + return err; + } + + wq->wqebb_size = wqebb_size; + wq->wq_page_size = wq_page_size; + wq->q_depth = q_depth; + wq->max_wqe_size = max_wqe_size; + wq->num_wqebbs_per_page = num_wqebbs_per_page; + + wq->wqebbs_per_page_shift = (u32)ilog2(num_wqebbs_per_page); + + wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); + wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); + wq->block_paddr = WQ_BASE_PADDR(wqs, wq); + + err = alloc_wq_pages(wqs->dev_hdl, wq); + if (err) { + sdk_err(wqs->dev_hdl, "Failed to allocate wq pages\n"); + goto alloc_wq_pages_err; + } + + atomic_set(&wq->delta, q_depth); + wq->cons_idx = 0; + wq->prod_idx = 0; + wq->mask = q_depth - 1; + + return 0; + +alloc_wq_pages_err: + wqs_return_block(wqs, wq->page_idx, wq->block_idx); + return err; +} + +void hifc_wq_free(struct hifc_wqs *wqs, struct hifc_wq *wq) +{ + free_wq_pages(wqs->dev_hdl, wq, wq->num_q_pages); + + wqs_return_block(wqs, wq->page_idx, wq->block_idx); +} + +static void init_wqs_blocks_arr(struct hifc_wqs *wqs) +{ + u32 page_idx, blk_idx, pos = 0; + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) { + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = blk_idx; + pos++; + } + } + + wqs->alloc_blk_pos = 0; + wqs->return_blk_pos = 0; + wqs->num_free_blks = WQS_MAX_NUM_BLOCKS; + spin_lock_init(&wqs->alloc_blocks_lock); +} + +void hifc_wq_wqe_pg_clear(struct hifc_wq *wq) +{ + u64 *block_vaddr; + u32 pg_idx; + + block_vaddr = wq->shadow_block_vaddr; + + atomic_set(&wq->delta, wq->q_depth); + wq->cons_idx = 0; + wq->prod_idx = 0; + + for (pg_idx = 0; pg_idx < wq->num_q_pages; pg_idx++) + memset((void *)(*(block_vaddr + pg_idx)), 0, wq->wq_page_size); +} + +int hifc_cmdq_alloc(struct hifc_cmdq_pages *cmdq_pages, + struct hifc_wq *wq, void *dev_hdl, + int cmdq_blocks, u32 wq_page_size, u32 wqebb_size, + u16 q_depth, u32 max_wqe_size) +{ + int i, j, err = -ENOMEM; + + if (q_depth & (q_depth - 1)) { + sdk_err(dev_hdl, "Cmdq q_depth(%d) isn't power of 2\n", + q_depth); + return -EINVAL; + } + + cmdq_pages->dev_hdl = dev_hdl; + + err = cmdq_allocate_page(cmdq_pages); + if (err) { + sdk_err(dev_hdl, "Failed to allocate CMDQ page\n"); + return err; + } + + for (i = 0; i < cmdq_blocks; i++) { + wq[i].page_idx = 0; + wq[i].block_idx = (u32)i; + wq[i].wqebb_size = wqebb_size; + wq[i].wq_page_size = wq_page_size; + wq[i].q_depth = q_depth; + wq[i].max_wqe_size = max_wqe_size; + wq[i].num_wqebbs_per_page = + ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + wq[i].wqebbs_per_page_shift = + (u32)ilog2(wq[i].num_wqebbs_per_page); + + wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); + wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); + wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); + + err = alloc_wq_pages(cmdq_pages->dev_hdl, &wq[i]); + if (err) { + sdk_err(dev_hdl, "Failed to alloc CMDQ blocks\n"); + goto cmdq_block_err; + } + + atomic_set(&wq[i].delta, q_depth); + wq[i].cons_idx = 0; + wq[i].prod_idx = 0; + wq[i].mask = q_depth - 1; + } + + return 0; + +cmdq_block_err: + for (j = 0; j < i; j++) + free_wq_pages(cmdq_pages->dev_hdl, &wq[j], wq[j].num_q_pages); + + cmdq_free_page(cmdq_pages); + return err; +} + +void hifc_cmdq_free(struct hifc_cmdq_pages *cmdq_pages, + struct hifc_wq *wq, int cmdq_blocks) +{ + int i; + + for (i = 0; i < cmdq_blocks; i++) + free_wq_pages(cmdq_pages->dev_hdl, &wq[i], wq[i].num_q_pages); + + cmdq_free_page(cmdq_pages); +} + +static int alloc_page_addr(struct hifc_wqs *wqs) +{ + u64 size = wqs->num_pages * sizeof(*wqs->page_paddr); + + wqs->page_paddr = kzalloc(size, GFP_KERNEL); + if (!wqs->page_paddr) + return -ENOMEM; + + size = wqs->num_pages * sizeof(*wqs->page_vaddr); + wqs->page_vaddr = kzalloc(size, GFP_KERNEL); + if (!wqs->page_vaddr) + goto page_vaddr_err; + + size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); + wqs->shadow_page_vaddr = kzalloc(size, GFP_KERNEL); + if (!wqs->shadow_page_vaddr) + goto page_shadow_vaddr_err; + + return 0; + +page_shadow_vaddr_err: + kfree(wqs->page_vaddr); + +page_vaddr_err: + kfree(wqs->page_paddr); + return -ENOMEM; +} + +static void free_page_addr(struct hifc_wqs *wqs) +{ + kfree(wqs->shadow_page_vaddr); + kfree(wqs->page_vaddr); + kfree(wqs->page_paddr); +} + +int hifc_wqs_alloc(struct hifc_wqs *wqs, int num_wqs, void *dev_hdl) +{ + u32 i, page_idx; + int err; + + wqs->dev_hdl = dev_hdl; + wqs->num_pages = WQ_NUM_PAGES(num_wqs); + + if (alloc_page_addr(wqs)) { + sdk_err(dev_hdl, "Failed to allocate mem for page addresses\n"); + return -ENOMEM; + } + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + err = wqs_allocate_page(wqs, page_idx); + if (err) { + sdk_err(dev_hdl, "Failed wq page allocation\n"); + goto wq_allocate_page_err; + } + } + + wqs->free_blocks = kzalloc(WQS_FREE_BLOCKS_SIZE(wqs), GFP_KERNEL); + if (!wqs->free_blocks) { + err = -ENOMEM; + goto alloc_blocks_err; + } + + init_wqs_blocks_arr(wqs); + return 0; + +alloc_blocks_err: +wq_allocate_page_err: + for (i = 0; i < page_idx; i++) + wqs_free_page(wqs, i); + + free_page_addr(wqs); + return err; +} + +void hifc_wqs_free(struct hifc_wqs *wqs) +{ + u32 page_idx; + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) + wqs_free_page(wqs, page_idx); + + free_page_addr(wqs); + kfree(wqs->free_blocks); +} + +static void copy_wqe_to_shadow(struct hifc_wq *wq, void *shadow_addr, + int num_wqebbs, u16 prod_idx) +{ + u8 *shadow_wqebb_addr, *wqe_page_addr, *wqebb_addr; + u32 i, offset; + u16 idx; + + for (i = 0; i < (u32)num_wqebbs; i++) { + offset = i * wq->wqebb_size; + shadow_wqebb_addr = (u8 *)shadow_addr + offset; + + idx = MASKED_WQE_IDX(wq, prod_idx + i); + wqe_page_addr = WQ_PAGE_ADDR(wq, idx); + wqebb_addr = wqe_page_addr + + WQE_PAGE_OFF(wq, MASKED_WQE_IDX(wq, idx)); + + memcpy(shadow_wqebb_addr, wqebb_addr, wq->wqebb_size); + } +} + +void *hifc_get_wqebb_addr(struct hifc_wq *wq, u16 index) +{ + return WQ_PAGE_ADDR(wq, index) + WQE_PAGE_OFF(wq, index); +} + +u64 hifc_get_first_wqe_page_addr(struct hifc_wq *wq) +{ + return be64_to_cpu(*wq->block_vaddr); +} + +void *hifc_get_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *prod_idx) +{ + u32 curr_pg, end_pg; + u16 curr_prod_idx, end_prod_idx; + + if (atomic_sub_return(num_wqebbs, &wq->delta) < 0) { + atomic_add(num_wqebbs, &wq->delta); + return NULL; + } + + /* use original cur_pi and end_pi, no need queue depth mask as + * WQE_PAGE_NUM will do num_queue_pages mask + */ + curr_prod_idx = (u16)wq->prod_idx; + wq->prod_idx += num_wqebbs; + + /* end prod index should points to the last wqebb of wqe, + * therefore minus 1 + */ + end_prod_idx = (u16)wq->prod_idx - 1; + + curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx); + end_pg = WQE_PAGE_NUM(wq, end_prod_idx); + + *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); + + /* If we only have one page, still need to get shadown wqe when + * wqe rolling-over page + */ + if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) { + u32 offset = curr_pg * wq->max_wqe_size; + u8 *shadow_addr = wq->shadow_wqe + offset; + + wq->shadow_idx[curr_pg] = *prod_idx; + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx); +} + +void hifc_put_wqe(struct hifc_wq *wq, int num_wqebbs) +{ + atomic_add(num_wqebbs, &wq->delta); + wq->cons_idx += num_wqebbs; +} + +void *hifc_read_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *cons_idx) +{ + u32 curr_pg, end_pg; + u16 curr_cons_idx, end_cons_idx; + + if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) + return NULL; + + curr_cons_idx = (u16)wq->cons_idx; + + curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); + end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1); + + curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx); + end_pg = WQE_PAGE_NUM(wq, end_cons_idx); + + *cons_idx = curr_cons_idx; + + if (curr_pg != end_pg) { + u32 offset = curr_pg * wq->max_wqe_size; + u8 *shadow_addr = wq->shadow_wqe + offset; + + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); + + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); +} diff --git a/drivers/scsi/huawei/hifc/hifc_wq.h b/drivers/scsi/huawei/hifc/hifc_wq.h new file mode 100644 index 000000000000..207d54191afa --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_wq.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_WQ_H +#define HIFC_WQ_H + +#define WQS_BLOCKS_PER_PAGE 4 +#define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size) + +#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \ + ((wq)->num_q_pages - 1)) + +#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \ + ((idx) & ((wq)->num_wqebbs_per_page - 1))) + +#define WQ_PAGE_ADDR_SIZE sizeof(u64) +#define WQ_PAGE_ADDR_SIZE_SHIFT 3 +#define WQ_PAGE_ADDR(wq, idx) \ + (u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \ + (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT))) + +#define WQ_BLOCK_SIZE 4096UL +#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE) +#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT) + +#define CMDQ_BLOCKS_PER_PAGE 8 +#define CMDQ_BLOCK_SIZE 512UL +#define CMDQ_PAGE_SIZE ALIGN((CMDQ_BLOCKS_PER_PAGE * \ + CMDQ_BLOCK_SIZE), PAGE_SIZE) + +#define ADDR_4K_ALIGNED(addr) (((addr) & 0xfff) == 0) + +#define WQ_BASE_VADDR(wqs, wq) \ + (u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \ + + (u64)(wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_ADDR(wqs, wq) \ + (u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define CMDQ_BASE_VADDR(cmdq_pages, wq) \ + (u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_PADDR(cmdq_pages, wq) \ + (((u64)((cmdq_pages)->cmdq_page_paddr)) \ + + (u64)(wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_ADDR(cmdq_pages, wq) \ + (u64 *)(((u64)((cmdq_pages)->cmdq_shadow_page_vaddr)) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) + +#define WQ_NUM_PAGES(num_wqs) \ + (ALIGN((u32)num_wqs, WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE) + +#define MAX_WQE_SIZE(max_sge, wqebb_size) \ + ((max_sge <= 2) ? (wqebb_size) : \ + ((ALIGN(((max_sge) - 2), 4) / 4 + 1) * (wqebb_size))) + +struct hifc_free_block { + u32 page_idx; + u32 block_idx; +}; + +struct hifc_wq { + /* The addresses are 64 bit in the HW */ + u64 block_paddr; + u64 *shadow_block_vaddr; + u64 *block_vaddr; + + u32 wqebb_size; + u32 wq_page_size; + u16 q_depth; + u32 max_wqe_size; + u32 num_wqebbs_per_page; + + /* performance: replace mul/div as shift; + * num_wqebbs_per_page must be power of 2 + */ + u32 wqebbs_per_page_shift; + u32 page_idx; + u32 block_idx; + + u32 num_q_pages; + + struct hifc_dma_addr_align *mem_align; + + int cons_idx; + int prod_idx; + + atomic_t delta; + u16 mask; + + u8 *shadow_wqe; + u16 *shadow_idx; +}; + +struct hifc_cmdq_pages { + /* The addresses are 64 bit in the HW */ + u64 cmdq_page_paddr; + u64 *cmdq_page_vaddr; + u64 *cmdq_shadow_page_vaddr; + + void *dev_hdl; +}; + +struct hifc_wqs { + /* The addresses are 64 bit in the HW */ + u64 *page_paddr; + u64 **page_vaddr; + u64 **shadow_page_vaddr; + + struct hifc_free_block *free_blocks; + u32 alloc_blk_pos; + u32 return_blk_pos; + int num_free_blks; + + /* for allocate blocks */ + spinlock_t alloc_blocks_lock; + + u32 num_pages; + + void *dev_hdl; +}; + +void hifc_wq_wqe_pg_clear(struct hifc_wq *wq); + +int hifc_cmdq_alloc(struct hifc_cmdq_pages *cmdq_pages, + struct hifc_wq *wq, void *dev_hdl, + int cmdq_blocks, u32 wq_page_size, u32 wqebb_size, + u16 q_depth, u32 max_wqe_size); + +void hifc_cmdq_free(struct hifc_cmdq_pages *cmdq_pages, + struct hifc_wq *wq, int cmdq_blocks); + +int hifc_wqs_alloc(struct hifc_wqs *wqs, int num_wqs, void *dev_hdl); + +void hifc_wqs_free(struct hifc_wqs *wqs); + +int hifc_wq_allocate(struct hifc_wqs *wqs, struct hifc_wq *wq, + u32 wqebb_size, u32 wq_page_size, u16 q_depth, + u32 max_wqe_size); + +void hifc_wq_free(struct hifc_wqs *wqs, struct hifc_wq *wq); + +void *hifc_get_wqebb_addr(struct hifc_wq *wq, u16 index); + +u64 hifc_get_first_wqe_page_addr(struct hifc_wq *wq); + +void *hifc_get_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *prod_idx); + +void hifc_put_wqe(struct hifc_wq *wq, int num_wqebbs); + +void *hifc_read_wqe(struct hifc_wq *wq, int num_wqebbs, u16 *cons_idx); + +#endif +
From: Chenguangli chenguangli2@huawei.com
driver inclusion category: feature bugzilla: NA
-----------------------------------------------------------------------
This module includes the tool channel for processing tool commands and management commands for managing nport and hba resources.
Signed-off-by: Chenguangli chenguangli2@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Acked-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/scsi/huawei/hifc/hifc_dbgtool_knl.c | 952 ++++ drivers/scsi/huawei/hifc/hifc_dbgtool_knl.h | 107 + drivers/scsi/huawei/hifc/hifc_lld.c | 895 +++ drivers/scsi/huawei/hifc/hifc_lld.h | 79 + drivers/scsi/huawei/hifc/hifc_portmng.c | 1273 +++++ drivers/scsi/huawei/hifc/hifc_portmng.h | 223 + drivers/scsi/huawei/hifc/hifc_tool.c | 785 +++ drivers/scsi/huawei/hifc/hifc_tool.h | 332 ++ drivers/scsi/huawei/hifc/hifc_tool_hw.c | 1010 ++++ drivers/scsi/huawei/hifc/unf_portman.c | 5565 +++++++++++++++++++ drivers/scsi/huawei/hifc/unf_portman.h | 305 + 11 files changed, 11526 insertions(+) create mode 100644 drivers/scsi/huawei/hifc/hifc_dbgtool_knl.c create mode 100644 drivers/scsi/huawei/hifc/hifc_dbgtool_knl.h create mode 100644 drivers/scsi/huawei/hifc/hifc_lld.c create mode 100644 drivers/scsi/huawei/hifc/hifc_lld.h create mode 100644 drivers/scsi/huawei/hifc/hifc_portmng.c create mode 100644 drivers/scsi/huawei/hifc/hifc_portmng.h create mode 100644 drivers/scsi/huawei/hifc/hifc_tool.c create mode 100644 drivers/scsi/huawei/hifc/hifc_tool.h create mode 100644 drivers/scsi/huawei/hifc/hifc_tool_hw.c create mode 100644 drivers/scsi/huawei/hifc/unf_portman.c create mode 100644 drivers/scsi/huawei/hifc/unf_portman.h
diff --git a/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.c b/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.c new file mode 100644 index 000000000000..a70b155b76f2 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.c @@ -0,0 +1,952 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/if.h> +#include <linux/ioctl.h> +#include <linux/pci.h> +#include <linux/fs.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_lld.h" +#include "hifc_tool.h" +#include "hifc_dbgtool_knl.h" + +struct ffm_intr_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; +}; + +#define DBGTOOL_MSG_MAX_SIZE 2048ULL +#define HIFC_SELF_CMD_UP2PF_FFM 0x26 + +void *g_card_node_array[MAX_CARD_NUM] = {0}; +void *g_card_vir_addr[MAX_CARD_NUM] = {0}; +u64 g_card_phy_addr[MAX_CARD_NUM] = {0}; +/* lock for g_card_vir_addr */ +struct mutex g_addr_lock; +int card_id; + +/* dbgtool character device name, class name, dev path */ +#define CHR_DEV_DBGTOOL "hifc_dbgtool_chr_dev" +#define CLASS_DBGTOOL "hifc_dbgtool_class" +#define DBGTOOL_DEV_PATH "/dev/hifc_dbgtool_chr_dev" + +struct dbgtool_k_glb_info { + struct semaphore dbgtool_sem; + struct ffm_record_info *ffm; +}; + +dev_t dbgtool_dev_id; /* device id */ +struct cdev dbgtool_chr_dev; /* struct of char device */ + +/*lint -save -e104 -e808*/ +struct class *dbgtool_d_class; /* struct of char class */ +/*lint -restore*/ + +int g_dbgtool_init_flag; +int g_dbgtool_ref_cnt; + +static int dbgtool_knl_open(struct inode *pnode, + struct file *pfile) +{ + return 0; +} + +static int dbgtool_knl_release(struct inode *pnode, + struct file *pfile) +{ + return 0; +} + +static ssize_t dbgtool_knl_read(struct file *pfile, + char __user *ubuf, + size_t size, + loff_t *ppos) +{ + return 0; +} + +static ssize_t dbgtool_knl_write(struct file *pfile, + const char __user *ubuf, + size_t size, + loff_t *ppos) +{ + return 0; +} + +static bool is_valid_phy_addr(u64 offset) +{ + int i; + + for (i = 0; i < MAX_CARD_NUM; i++) { + if (offset == g_card_phy_addr[i]) + return true; + } + + return false; +} + +int hifc_mem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vmsize = vma->vm_end - vma->vm_start; + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + phys_addr_t phy_addr; + + if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) { + pr_err("Map size = %lu is bigger than alloc\n", vmsize); + return -EAGAIN; + } + + if (offset && !is_valid_phy_addr((u64)offset) && + !hifc_is_valid_bar_addr((u64)offset)) { + pr_err("offset is invalid"); + return -EAGAIN; + } + + /* old version of tool set vma->vm_pgoff to 0 */ + phy_addr = offset ? offset : g_card_phy_addr[card_id]; + if (!phy_addr) { + pr_err("Card_id = %d physical address is 0\n", card_id); + return -EAGAIN; + } + + if (remap_pfn_range(vma, vma->vm_start, + (phy_addr >> PAGE_SHIFT), + vmsize, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +/** + * dbgtool_knl_api_cmd_read - used for read operations + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_api_cmd_read(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; + u32 pf_id; + void *hwdev; + + pf_id = para->param.api_rd.pf_id; + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big\n", pf_id); + return -EFAULT; + } + + /* obtaining pf_id chipif pointer */ + hwdev = g_func_handle_array[pf_id]; + if (!hwdev) { + pr_err("PF id(0x%x) handle null in api cmd read\n", pf_id); + return -EFAULT; + } + + /* alloc cmd and ack memory */ + size = para->param.api_rd.size; + if (para->param.api_rd.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("Read cmd size invalid or more than 2K\n"); + return -EINVAL; + } + cmd = kzalloc((unsigned long long)size, GFP_KERNEL); + if (!cmd) { + pr_err("Alloc read cmd mem fail\n"); + return -ENOMEM; + } + + ack_size = para->param.api_rd.ack_size; + if (para->param.api_rd.ack_size == 0 || + ack_size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("Read cmd ack size is 0\n"); + ret = -ENOMEM; + goto alloc_ack_mem_fail; + } + + ack = kzalloc((unsigned long long)ack_size, GFP_KERNEL); + if (!ack) { + pr_err("Alloc read ack mem fail\n"); + ret = -ENOMEM; + goto alloc_ack_mem_fail; + } + + /* cmd content copied from user-mode */ + if (copy_from_user(cmd, para->param.api_rd.cmd, (unsigned long)size)) { + pr_err("Copy cmd from user fail\n"); + ret = -EFAULT; + goto copy_user_cmd_fail; + } + /* Invoke the api cmd interface read content*/ + ret = hifc_api_cmd_read_ack(hwdev, para->param.api_rd.dest, + cmd, size, ack, ack_size); + if (ret) { + pr_err("Api send single cmd ack fail!\n"); + goto api_rd_fail; + } + + /* Copy the contents of the ack to the user state */ + if (copy_to_user(para->param.api_rd.ack, ack, ack_size)) { + pr_err("Copy ack to user fail\n"); + ret = -EFAULT; + } +api_rd_fail: +copy_user_cmd_fail: + kfree(ack); +alloc_ack_mem_fail: + kfree(cmd); + return ret; +} + +/** + * dbgtool_knl_api_cmd_write - used for write operations + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_api_cmd_write(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + u8 *cmd; + u16 size; + u32 pf_id; + void *hwdev; + + pf_id = para->param.api_wr.pf_id; + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big\n", pf_id); + return -EFAULT; + } + + /* obtaining chipif pointer according to pf_id */ + hwdev = g_func_handle_array[pf_id]; + if (!hwdev) { + pr_err("PF id(0x%x) handle null\n", pf_id); + return -EFAULT; + } + + /* alloc cmd memory */ + size = para->param.api_wr.size; + if (para->param.api_wr.size == 0 || size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("Write cmd size invalid or more than 2K\n"); + return -EINVAL; + } + cmd = kzalloc((unsigned long long)size, GFP_KERNEL); + if (!cmd) { + pr_err("Alloc write cmd mem fail\n"); + return -ENOMEM; + } + + /* cmd content copied from user-mode */ + if (copy_from_user(cmd, para->param.api_wr.cmd, (unsigned long)size)) { + pr_err("Copy cmd from user fail\n"); + ret = -EFAULT; + goto copy_user_cmd_fail; + } + + /* api cmd interface is invoked to write the content */ + ret = hifc_api_cmd_write_nack(hwdev, para->param.api_wr.dest, + cmd, size); + if (ret) + pr_err("Api send single cmd nack fail\n"); + +copy_user_cmd_fail: + kfree(cmd); + return ret; +} + +void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_idx, + void **g_func_handle_array) +{ + u32 func_idx; + struct hifc_hwdev *hwdev; + + if (!dev_info) { + pr_err("Params error!\n"); + return; + } + + /* pf at most 16 */ + for (func_idx = 0; func_idx < 16; func_idx++) { + hwdev = (struct hifc_hwdev *)g_func_handle_array[func_idx]; + + dev_info[func_idx].phy_addr = g_card_phy_addr[card_idx]; + + if (!hwdev) { + dev_info[func_idx].bar0_size = 0; + dev_info[func_idx].bus = 0; + dev_info[func_idx].slot = 0; + dev_info[func_idx].func = 0; + } else { + dev_info[func_idx].bar0_size = + pci_resource_len + (((struct pci_dev *)hwdev->pcidev_hdl), 0); + dev_info[func_idx].bus = + ((struct pci_dev *) + hwdev->pcidev_hdl)->bus->number; + dev_info[func_idx].slot = + PCI_SLOT(((struct pci_dev *)hwdev->pcidev_hdl) + ->devfn); + dev_info[func_idx].func = + PCI_FUNC(((struct pci_dev *)hwdev->pcidev_hdl) + ->devfn); + } + } +} + +/** + * dbgtool_knl_pf_dev_info_get - Obtain the pf sdk_info + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_pf_dev_info_get(struct dbgtool_param *para, + void **g_func_handle_array) +{ + struct pf_dev_info dev_info[16] = { {0} }; + unsigned char *tmp; + int i; + + mutex_lock(&g_addr_lock); + if (!g_card_vir_addr[card_id]) { + g_card_vir_addr[card_id] = + (void *)__get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); + if (!g_card_vir_addr[card_id]) { + pr_err("Alloc dbgtool api chain fail!\n"); + mutex_unlock(&g_addr_lock); + return -EFAULT; + } + + memset(g_card_vir_addr[card_id], 0, + PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER)); + + g_card_phy_addr[card_id] = + virt_to_phys(g_card_vir_addr[card_id]); + if (!g_card_phy_addr[card_id]) { + pr_err("phy addr for card %d is 0\n", card_id); + free_pages((unsigned long)g_card_vir_addr[card_id], + DBGTOOL_PAGE_ORDER); + g_card_vir_addr[card_id] = NULL; + mutex_unlock(&g_addr_lock); + return -EFAULT; + } + + tmp = g_card_vir_addr[card_id]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + } + mutex_unlock(&g_addr_lock); + + chipif_get_all_pf_dev_info(dev_info, card_id, g_func_handle_array); + + /* Copy the dev_info to user mode */ + if (copy_to_user(para->param.dev_info, dev_info, + (unsigned int)sizeof(dev_info))) { + pr_err("Copy dev_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dbgtool_knl_ffm_info_rd - Read ffm information + * @para: the dbgtool parameter + * @dbgtool_info: the dbgtool info + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_ffm_info_rd(struct dbgtool_param *para, + struct dbgtool_k_glb_info *dbgtool_info) +{ + /* Copy the ffm_info to user mode */ + if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm, + (unsigned int)sizeof(struct ffm_record_info))) { + pr_err("Copy ffm_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dbgtool_knl_ffm_info_clr - Clear FFM information + * @para: unused + * @dbgtool_info: the dbgtool info + */ +void dbgtool_knl_ffm_info_clr(struct dbgtool_param *para, + struct dbgtool_k_glb_info *dbgtool_info) +{ + dbgtool_info->ffm->ffm_num = 0; +} + +/** + * dbgtool_knl_msg_to_up - After receiving dbgtool command sends a message to uP + * @para: the dbgtool parameter + * @g_func_handle_array: global function handle + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_msg_to_up(struct dbgtool_param *para, + void **g_func_handle_array) +{ + long ret = 0; + void *buf_in; + void *buf_out; + u16 out_size; + u8 pf_id; + + if (para->param.msg2up.in_size > DBGTOOL_MSG_MAX_SIZE) { + pr_err("User data(%d) more than 2KB\n", + para->param.msg2up.in_size); + return -EFAULT; + } + + pf_id = para->param.msg2up.pf_id; + /* pf at most 16 */ + if (pf_id >= 16) { + pr_err("PF id(0x%x) too big in message to mgmt\n", pf_id); + return -EFAULT; + } + + if (!g_func_handle_array[pf_id]) { + pr_err("PF id(0x%x) handle null in message to mgmt\n", pf_id); + return -EFAULT; + } + + /* alloc buf_in and buf_out memory, apply for 2K */ + buf_in = kzalloc(DBGTOOL_MSG_MAX_SIZE, GFP_KERNEL); + if (!buf_in) { + pr_err("Alloc buf_in mem fail\n"); + return -ENOMEM; + } + + buf_out = kzalloc(DBGTOOL_MSG_MAX_SIZE, 0); + if (!buf_out) { + pr_err("Alloc buf_out mem fail\n"); + ret = -ENOMEM; + goto alloc_buf_out_mem_fail; + } + + /* copy buf_in from the user state */ + if (copy_from_user(buf_in, para->param.msg2up.buf_in, + (unsigned long)para->param.msg2up.in_size)) { + pr_err("Copy buf_in from user fail\n"); + ret = -EFAULT; + goto copy_user_buf_in_fail; + } + + out_size = DBGTOOL_MSG_MAX_SIZE; + /* Invoke the pf2up communication interface */ + ret = hifc_msg_to_mgmt_sync(g_func_handle_array[pf_id], + para->param.msg2up.mod, + para->param.msg2up.cmd, + buf_in, + para->param.msg2up.in_size, + buf_out, + &out_size, + 0); + if (ret) + goto msg_2_up_fail; + + /* Copy the out_size and buf_out content to user mode */ + if (copy_to_user(para->param.msg2up.out_size, &out_size, + (unsigned int)sizeof(out_size))) { + pr_err("Copy out_size to user fail\n"); + ret = -EFAULT; + goto copy_out_size_fail; + } + + if (copy_to_user(para->param.msg2up.buf_out, buf_out, out_size)) { + pr_err("Copy buf_out to user fail\n"); + ret = -EFAULT; + } + +copy_out_size_fail: +msg_2_up_fail: +copy_user_buf_in_fail: + kfree(buf_out); +alloc_buf_out_mem_fail: + kfree(buf_in); + return ret; +} + +long dbgtool_knl_free_mem(int id) +{ + unsigned char *tmp; + int i; + + mutex_lock(&g_addr_lock); + + if (!g_card_vir_addr[id]) { + mutex_unlock(&g_addr_lock); + return 0; + } + + tmp = g_card_vir_addr[id]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + ClearPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + + free_pages((unsigned long)g_card_vir_addr[id], DBGTOOL_PAGE_ORDER); + g_card_vir_addr[id] = NULL; + g_card_phy_addr[id] = 0; + + mutex_unlock(&g_addr_lock); + + return 0; +} + +static int get_card_id_by_name(char *chip_name) +{ + struct card_node *card_info = NULL; + int i; + + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_card_node_array[i]; + if (!card_info) + continue; + if (!strncmp(chip_name, card_info->chip_name, IFNAMSIZ)) + break; + } + + if (i == MAX_CARD_NUM) { + pr_err("Can't find this card %s\n", chip_name); + return -EFAULT; + } + + return i; +} + +/*lint -save -e771 -e794*/ + +static long process_dbgtool_cmd(struct dbgtool_param *param, unsigned int cmd, + int idx) +{ + struct dbgtool_k_glb_info *dbgtool_info; + struct card_node *card_info = NULL; + unsigned int real_cmd; + long ret = 0; + + card_id = idx; + card_info = (struct card_node *)g_card_node_array[idx]; + dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info; + + down(&dbgtool_info->dbgtool_sem); + + real_cmd = _IOC_NR(cmd); + + switch (real_cmd) { + case DBGTOOL_CMD_API_RD: + ret = dbgtool_knl_api_cmd_read(param, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_API_WR: + ret = dbgtool_knl_api_cmd_write(param, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_FFM_RD: + ret = dbgtool_knl_ffm_info_rd(param, dbgtool_info); + break; + case DBGTOOL_CMD_FFM_CLR: + dbgtool_knl_ffm_info_clr(param, dbgtool_info); + break; + case DBGTOOL_CMD_PF_DEV_INFO_GET: + ret = dbgtool_knl_pf_dev_info_get(param, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_MSG_2_UP: + ret = dbgtool_knl_msg_to_up(param, + card_info->func_handle_array); + break; + case DBGTOOL_CMD_FREE_MEM: + ret = dbgtool_knl_free_mem(idx); + break; + default: + pr_err("Dbgtool cmd(x%x) not support now\n", real_cmd); + ret = -EFAULT; + } + + up(&dbgtool_info->dbgtool_sem); + + return ret; +} + +/** + * dbgtool_knl_unlocked_ioctl - dbgtool ioctl entry + * @pfile: the pointer to file + * @cmd: the command type + * @arg: user space + * Return: 0 - success, negative - failure + */ +long dbgtool_knl_unlocked_ioctl(struct file *pfile, + unsigned int cmd, + unsigned long arg) +{ + struct dbgtool_param param; + int idx; + + (void)memset(¶m, 0, sizeof(param)); + + if (copy_from_user(¶m, (void *)arg, sizeof(param))) { + pr_err("Copy param from user fail\n"); + return -EFAULT; + } + + param.chip_name[IFNAMSIZ - 1] = '\0'; + idx = get_card_id_by_name(param.chip_name); + if (idx < 0) + return -EFAULT; + + return process_dbgtool_cmd(¶m, cmd, idx); +} + +static struct card_node *get_card_node_by_hwdev(const void *handle) +{ + struct card_node *card_info = NULL; + bool flag = false; + int i, j; + + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_card_node_array[i]; + if (!card_info) + continue; + + for (j = 0; j < MAX_FUNCTION_NUM; j++) { + if (handle == card_info->func_handle_array[j]) { + flag = true; + break; + } + } + + if (flag) + break; + } + + if (i == MAX_CARD_NUM) { + pr_err("Id(%d) cant find this card\n", i); + return NULL; + } + + return card_info; +} + +/** + * ffm_intr_msg_record - FFM interruption records sent up + * @handle: the function handle + * @buf_in: the pointer to input buffer + * @in_size: input buffer size + * @buf_out: the pointer to outputput buffer + * @out_size: output buffer size + */ +void ffm_intr_msg_record(void *handle, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct dbgtool_k_glb_info *dbgtool_info; + struct ffm_intr_info *intr; + u32 ffm_idx; + struct timex txc; + struct rtc_time rctm; + struct card_node *card_info = NULL; + + card_info = get_card_node_by_hwdev(handle); + if (!card_info) + return; + + dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info; + if (!dbgtool_info) { + pr_err("Dbgtool info is null\n"); + return; + } + + intr = (struct ffm_intr_info *)buf_in; + + if (!dbgtool_info->ffm) + return; + + ffm_idx = dbgtool_info->ffm->ffm_num; + if (ffm_idx < FFM_RECORD_NUM_MAX) { + pr_info("%s: recv intr, ffm_idx: %d\n", __func__, ffm_idx); + + dbgtool_info->ffm->ffm[ffm_idx].node_id = intr->node_id; + dbgtool_info->ffm->ffm[ffm_idx].err_level = intr->err_level; + dbgtool_info->ffm->ffm[ffm_idx].err_type = intr->err_type; + dbgtool_info->ffm->ffm[ffm_idx].err_csr_addr = + intr->err_csr_addr; + dbgtool_info->ffm->ffm[ffm_idx].err_csr_value = + intr->err_csr_value; + + /* Obtain the current UTC time */ + do_gettimeofday(&txc.time); + + /* Calculate the time in date value to tm */ + rtc_time_to_tm((unsigned long)txc.time.tv_sec + + 60 * 60 * 8, &rctm); + + /* tm_year starts from 1900; 0->1900, 1->1901, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].year = + (u16)(rctm.tm_year + 1900); + /* tm_mon starts from 0, 0 indicates January, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)rctm.tm_mon + 1; + dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)rctm.tm_mday; + dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)rctm.tm_hour; + dbgtool_info->ffm->ffm[ffm_idx].min = (u8)rctm.tm_min; + dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)rctm.tm_sec; + + dbgtool_info->ffm->ffm_num++; + } +} + +/*lint -restore*/ + +/*lint -save -e785 -e438*/ +static const struct file_operations dbgtool_file_operations = { + .owner = THIS_MODULE, + .open = dbgtool_knl_open, + .release = dbgtool_knl_release, + .read = dbgtool_knl_read, + .write = dbgtool_knl_write, + .unlocked_ioctl = dbgtool_knl_unlocked_ioctl, + .mmap = hifc_mem_mmap, +}; + +static int dbgtool_create_cdev(void) +{ + struct device *pdevice; + int ret = 0; + + /* alloc device id */ + ret = alloc_chrdev_region(&(dbgtool_dev_id), 0, 1, CHR_DEV_DBGTOOL); + if (ret) { + pr_err("Alloc dbgtool chrdev region fail, ret=0x%x\n", ret); + return ret; + } + + cdev_init(&(dbgtool_chr_dev), &dbgtool_file_operations); + + ret = cdev_add(&(dbgtool_chr_dev), dbgtool_dev_id, 1); + if (ret) { + pr_err("Add dgbtool dev fail, ret=0x%x\n", ret); + goto cdev_add_fail; + } + + /*lint -save -e160*/ + dbgtool_d_class = class_create(THIS_MODULE, CLASS_DBGTOOL); + /*lint -restore*/ + if (IS_ERR(dbgtool_d_class)) { + pr_err("Create dgbtool class fail\n"); + ret = -EFAULT; + goto cls_create_fail; + } + + /* Export device information to user space + * (/sys/class/class name/device name) + */ + pdevice = device_create(dbgtool_d_class, NULL, + dbgtool_dev_id, NULL, CHR_DEV_DBGTOOL); + if (IS_ERR(pdevice)) { + pr_err("Create dgbtool device fail\n"); + ret = -EFAULT; + goto dev_create_fail; + } + + return 0; + +dev_create_fail: + class_destroy(dbgtool_d_class); +cls_create_fail: + cdev_del(&(dbgtool_chr_dev)); +cdev_add_fail: + unregister_chrdev_region(dbgtool_dev_id, 1); + + return ret; +} + +/** + * dbgtool_knl_init - dbgtool character device init + * @hwdev: the pointer to hardware device + * @chip_node: the pointer to card node + * Return: 0 - success, negative - failure + */ +int dbgtool_knl_init(void *vhwdev, void *chip_node) +{ + struct card_node *chip_info = (struct card_node *)chip_node; + struct dbgtool_k_glb_info *dbgtool_info; + struct hifc_hwdev *hwdev = vhwdev; + int ret = 0; + int id; + + if (hifc_func_type(hwdev) == TYPE_VF) + return 0; + + ret = sysfs_create_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + if (ret) { + pr_err("Failed to sysfs create file\n"); + return ret; + } + + chip_info->func_handle_array[hifc_global_func_id(hwdev)] = hwdev; + + hifc_comm_recv_mgmt_self_cmd_reg(hwdev, HIFC_SELF_CMD_UP2PF_FFM, + ffm_intr_msg_record); + + if (chip_info->dbgtool_info) { + chip_info->func_num++; + return 0; + } + + dbgtool_info = (struct dbgtool_k_glb_info *) + kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL); + if (!dbgtool_info) { + pr_err("Failed to allocate dbgtool_info\n"); + ret = -EFAULT; + goto dbgtool_info_fail; + } + chip_info->dbgtool_info = dbgtool_info; + + /* FFM init */ + dbgtool_info->ffm = (struct ffm_record_info *) + kzalloc(sizeof(struct ffm_record_info), + GFP_KERNEL); + if (!dbgtool_info->ffm) { + pr_err("Failed to allocate cell contexts for a chain\n"); + ret = -EFAULT; + goto dbgtool_info_ffm_fail; + } + + sema_init(&dbgtool_info->dbgtool_sem, 1); + + ret = sscanf(chip_info->chip_name, HIFC_CHIP_NAME "%d", &id); + if (ret < 0) { + pr_err("Failed to get hifc id\n"); + goto sscanf_chdev_fail; + } + + g_card_node_array[id] = chip_info; + chip_info->func_num++; + + if (g_dbgtool_init_flag) { + g_dbgtool_ref_cnt++; + /* already initialized */ + return 0; + } + + ret = dbgtool_create_cdev(); + if (ret) + goto alloc_chdev_fail; + + g_dbgtool_init_flag = 1; + g_dbgtool_ref_cnt = 1; + mutex_init(&g_addr_lock); + + return 0; + +alloc_chdev_fail: + g_card_node_array[id] = NULL; +sscanf_chdev_fail: + kfree(dbgtool_info->ffm); +dbgtool_info_ffm_fail: + kfree(dbgtool_info); + dbgtool_info = NULL; + chip_info->dbgtool_info = NULL; +dbgtool_info_fail: + hifc_comm_recv_up_self_cmd_unreg(hwdev, HIFC_SELF_CMD_UP2PF_FFM); + chip_info->func_handle_array[hifc_global_func_id(hwdev)] = NULL; + sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + return ret; +} + +/** + * dbgtool_knl_deinit - dbgtool character device deinit + * @hwdev: the pointer to hardware device + * @chip_node: the pointer to card node + */ +void dbgtool_knl_deinit(void *vhwdev, void *chip_node) +{ + struct dbgtool_k_glb_info *dbgtool_info; + struct card_node *chip_info = (struct card_node *)chip_node; + int id; + int err; + struct hifc_hwdev *hwdev = vhwdev; + + if (hifc_func_type(hwdev) == TYPE_VF) + return; + + hifc_comm_recv_up_self_cmd_unreg(hwdev, HIFC_SELF_CMD_UP2PF_FFM); + + chip_info->func_handle_array[hifc_global_func_id(hwdev)] = NULL; + + sysfs_remove_file(&((struct device *)(hwdev->dev_hdl))->kobj, + &chip_info->dbgtool_attr_file); + + chip_info->func_num--; + if (chip_info->func_num) + return; + + err = sscanf(chip_info->chip_name, HIFC_CHIP_NAME "%d", &id); + if (err < 0) + pr_err("Failed to get hifc id\n"); + + g_card_node_array[id] = NULL; + + dbgtool_info = chip_info->dbgtool_info; + /* FFM deinit */ + kfree(dbgtool_info->ffm); + dbgtool_info->ffm = NULL; + + kfree(dbgtool_info); + chip_info->dbgtool_info = NULL; + + (void)dbgtool_knl_free_mem(id); + + if (g_dbgtool_init_flag) { + if ((--g_dbgtool_ref_cnt)) + return; + } + + if (!dbgtool_d_class) + return; + + device_destroy(dbgtool_d_class, dbgtool_dev_id); + class_destroy(dbgtool_d_class); + dbgtool_d_class = NULL; + + cdev_del(&(dbgtool_chr_dev)); + unregister_chrdev_region(dbgtool_dev_id, 1); + + g_dbgtool_init_flag = 0; +} + +/*lint -restore*/ diff --git a/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.h b/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.h new file mode 100644 index 000000000000..fa8cc4b81b67 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_dbgtool_knl.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __DBGTOOL_KNL_H__ +#define __DBGTOOL_KNL_H__ + +enum dbg_tool_cmd { + DBGTOOL_CMD_API_RD = 0, + DBGTOOL_CMD_API_WR, + + DBGTOOL_CMD_FFM_RD, + DBGTOOL_CMD_FFM_CLR, + + DBGTOOL_CMD_PF_DEV_INFO_GET, + + DBGTOOL_CMD_MSG_2_UP, + + DBGTOOL_CMD_FREE_MEM, + DBGTOOL_CMD_NUM +}; + +struct api_cmd_rd { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; + void *ack; + u16 ack_size; +}; + +struct api_cmd_wr { + u32 pf_id; + u8 dest; + u8 *cmd; + u16 size; +}; + +struct pf_dev_info { + u64 bar0_size; + u8 bus; + u8 slot; + u8 func; + u64 phy_addr; +}; + +/* Interrupt at most records, interrupt will be recorded in the FFM */ +#define FFM_RECORD_NUM_MAX 64 + +struct ffm_intr_tm_info { + u8 node_id; + /* error level of the interrupt source */ + u8 err_level; + /* Classification by interrupt source properties */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + + u8 sec; /* second*/ + u8 min; /* minute */ + u8 hour; /* hour */ + u8 mday; /* day */ + u8 mon; /* month */ + u16 year; /* year */ +}; + +struct ffm_record_info { + u32 ffm_num; + struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX]; +}; + +struct msg_2_up { + u8 pf_id; /* which pf sends messages to the up */ + u8 mod; + u8 cmd; + void *buf_in; + u16 in_size; + void *buf_out; + u16 *out_size; +}; + +struct dbgtool_param { + union { + struct api_cmd_rd api_rd; + struct api_cmd_wr api_wr; + struct pf_dev_info *dev_info; + struct ffm_record_info *ffm_rd; + struct msg_2_up msg2up; + } param; + char chip_name[16]; +}; + +#ifndef MAX_CARD_NUM +#define MAX_CARD_NUM 64 +#endif +#define DBGTOOL_PAGE_ORDER 10 + +int dbgtool_knl_init(void *vhwdev, void *chip_node); +void dbgtool_knl_deinit(void *vhwdev, void *chip_node); +int hifc_mem_mmap(struct file *filp, struct vm_area_struct *vma); +void chipif_get_all_pf_dev_info(struct pf_dev_info *dev_info, int card_id, + void **g_func_handle_array); +long dbgtool_knl_free_mem(int id); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_lld.c b/drivers/scsi/huawei/hifc/hifc_lld.c new file mode 100644 index 000000000000..d86bc8a42919 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_lld.c @@ -0,0 +1,895 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/io-mapping.h> +#include <linux/interrupt.h> +#include <linux/inetdevice.h> +#include <net/addrconf.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/aer.h> +#include <linux/debugfs.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_lld.h" +#include "hifc_dbgtool_knl.h" +#include "hifc_tool.h" + +#define HIFC_PCI_CFG_REG_BAR 0 +#define HIFC_PCI_INTR_REG_BAR 2 +#define HIFC_PCI_DB_BAR 4 +#define HIFC_SECOND_BASE 1000 +#define HIFC_SYNC_YEAR_OFFSET 1900 +#define HIFC_SYNC_MONTH_OFFSET 1 + +#define HIFC_DRV_DESC "Huawei(R) Intelligent Network Interface Card Driver" +#define HIFCVF_DRV_DESC "Huawei(R) Intelligent Virtual Function Network Driver" + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION(HIFC_DRV_DESC); +MODULE_VERSION(HIFC_DRV_VERSION); +MODULE_LICENSE("GPL"); + +#define HIFC_EVENT_PROCESS_TIMEOUT 10000 + +#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0) +#define SET_BIT(num, n) ((num) | (1UL << (n))) +#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n)))) + +#define MAX_CARD_ID 64 +static u64 card_bit_map; +LIST_HEAD(g_hinic_chip_list); + +enum hifc_lld_status { + HIFC_NODE_CHANGE = BIT(0), +}; + +struct hifc_lld_lock { + /* lock for chip list */ + struct mutex lld_mutex; + unsigned long status; + atomic_t dev_ref_cnt; +}; + +struct hifc_lld_lock g_lld_lock; + +#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */ +#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ + +/* node in chip_node will changed, tools or driver can't get node + * during this situation + */ +static void lld_lock_chip_node(void) +{ + u32 loop_cnt; + + mutex_lock(&g_lld_lock.lld_mutex); + + loop_cnt = 0; + while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) { + if (!test_and_set_bit(HIFC_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait for lld node change complete for %us\n", + loop_cnt / 1000); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED) + pr_warn("Wait for lld node change complete timeout when try to get lld lock\n"); + + loop_cnt = 0; + while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) { + if (!atomic_read(&g_lld_lock.dev_ref_cnt)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait for lld dev unused for %us, reference count: %d\n", + loop_cnt / 1000, + atomic_read(&g_lld_lock.dev_ref_cnt)); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY) + pr_warn("Wait for lld dev unused timeout\n"); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +static void lld_unlock_chip_node(void) +{ + clear_bit(HIFC_NODE_CHANGE, &g_lld_lock.status); +} + +/* When tools or other drivers want to get node of chip_node, use this function + * to prevent node be freed + */ +void lld_dev_hold(void) +{ + u32 loop_cnt = 0; + + /* ensure there have not any chip node in changing */ + mutex_lock(&g_lld_lock.lld_mutex); + + while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) { + if (!test_bit(HIFC_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % 10000 == 0) + pr_warn("Wait lld node change complete for %us\n", + loop_cnt / 1000); + + usleep_range(900, 1000); + } + + if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT) + pr_warn("Wait lld node change complete timeout when try to hode lld dev\n"); + + atomic_inc(&g_lld_lock.dev_ref_cnt); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +void lld_dev_put(void) +{ + atomic_dec(&g_lld_lock.dev_ref_cnt); +} + +static void hifc_lld_lock_init(void) +{ + mutex_init(&g_lld_lock.lld_mutex); + atomic_set(&g_lld_lock.dev_ref_cnt, 0); +} + +extern int hifc_probe(struct hifc_lld_dev *lld_dev, + void **uld_dev, char *uld_dev_name); + +static int attach_uld(struct hifc_pcidev *dev) +{ + void *uld_dev = NULL; + + int err; + + mutex_lock(&dev->pdev_mutex); + + if (dev->init_state < HIFC_INIT_STATE_HWDEV_INITED) { + sdk_err(&dev->pcidev->dev, "SDK init failed, can not attach uld\n"); + err = -EFAULT; + goto out_unlock; + } + + err = hifc_stateful_init(dev->hwdev); + if (err) + goto out_unlock; + + err = hifc_probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name); + if (err || !uld_dev) { + sdk_err(&dev->pcidev->dev, + "Failed to add object for driver to pcie device\n"); + goto probe_failed; + } + + dev->uld_dev = uld_dev; + mutex_unlock(&dev->pdev_mutex); + + sdk_info(&dev->pcidev->dev, + "Attach driver to pcie device succeed\n"); + return 0; + +probe_failed: + hifc_stateful_deinit(dev->hwdev); +out_unlock: + mutex_unlock(&dev->pdev_mutex); + + return err; +} + +extern void hifc_remove(struct hifc_lld_dev *lld_dev, void *uld_dev); + +static void detach_uld(struct hifc_pcidev *dev) +{ + u32 cnt = 0; + + mutex_lock(&dev->pdev_mutex); + + while (cnt < HIFC_EVENT_PROCESS_TIMEOUT) { + if (!test_and_set_bit(SERVICE_T_FC, &dev->state)) + break; + usleep_range(900, 1000); + cnt++; + } + + hifc_remove(&dev->lld_dev, dev->uld_dev); + dev->uld_dev = NULL; + hifc_stateful_deinit(dev->hwdev); + if (cnt < HIFC_EVENT_PROCESS_TIMEOUT) + clear_bit(SERVICE_T_FC, &dev->state); + + sdk_info(&dev->pcidev->dev, + "Detach driver from pcie device succeed\n"); + mutex_unlock(&dev->pdev_mutex); +} + +static void hifc_sync_time_to_fmw(struct hifc_pcidev *pdev_pri) +{ + struct timeval tv = {0}; + struct rtc_time rt_time = {0}; + u64 tv_msec; + int err; + + do_gettimeofday(&tv); + + tv_msec = tv.tv_sec * HIFC_SECOND_BASE + + tv.tv_usec / HIFC_SECOND_BASE; + err = hifc_sync_time(pdev_pri->hwdev, tv_msec); + if (err) { + sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", + err); + } else { + rtc_time_to_tm(tv.tv_sec, &rt_time); + sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", + rt_time.tm_year + HIFC_SYNC_YEAR_OFFSET, + rt_time.tm_mon + HIFC_SYNC_MONTH_OFFSET, + rt_time.tm_mday, rt_time.tm_hour, + rt_time.tm_min, rt_time.tm_sec); + } +} + +#define MAX_VER_FIELD_LEN 4 +#define MAX_VER_SPLIT_NUM 4 + +struct mctp_hdr { + u16 resp_code; + u16 reason_code; + u32 manufacture_id; + + u8 cmd_rsvd; + u8 major_cmd; + u8 sub_cmd; + u8 spc_field; +}; + +struct mctp_bdf_info { + struct mctp_hdr hdr; /* spc_field: pf index */ + u8 rsvd; + u8 bus; + u8 device; + u8 function; +}; + +static void __mctp_set_hdr(struct mctp_hdr *hdr, + struct hifc_mctp_host_info *mctp_info) +{ + u32 manufacture_id = 0x07DB; + + hdr->cmd_rsvd = 0; + hdr->major_cmd = mctp_info->major_cmd; + hdr->sub_cmd = mctp_info->sub_cmd; + hdr->manufacture_id = cpu_to_be32(manufacture_id); + hdr->resp_code = cpu_to_be16(hdr->resp_code); + hdr->reason_code = cpu_to_be16(hdr->reason_code); +} + +static void __mctp_get_bdf(struct hifc_pcidev *pci_adapter, + struct hifc_mctp_host_info *mctp_info) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + struct mctp_bdf_info *bdf_info = mctp_info->data; + + bdf_info->bus = pdev->bus->number; + bdf_info->device = (u8)(pdev->devfn >> 3); /* 5bits in devfn */ + bdf_info->function = (u8)(pdev->devfn & 0x7); /* 3bits in devfn */ + + memset(&bdf_info->hdr, 0, sizeof(bdf_info->hdr)); + __mctp_set_hdr(&bdf_info->hdr, mctp_info); + bdf_info->hdr.spc_field = + (u8)hifc_global_func_id_hw(pci_adapter->hwdev); + + mctp_info->data_len = sizeof(*bdf_info); +} + +#define MCTP_PUBLIC_SUB_CMD_BDF 0x1 + +static void __mctp_get_host_info(struct hifc_pcidev *dev, + struct hifc_mctp_host_info *mctp_info) +{ +#define COMMAND_UNSUPPORTED 3 + struct mctp_hdr *hdr; + + if (((((u16)mctp_info->major_cmd) << 8) | mctp_info->sub_cmd) == + MCTP_PUBLIC_SUB_CMD_BDF) { + __mctp_get_bdf(dev, mctp_info); + } else { + hdr = mctp_info->data; + hdr->reason_code = COMMAND_UNSUPPORTED; + __mctp_set_hdr(hdr, mctp_info); + mctp_info->data_len = sizeof(*hdr); + } +} + +void *hifc_get_ppf_hwdev_by_pdev(struct pci_dev *pdev) +{ + struct hifc_pcidev *pci_adapter; + struct card_node *chip_node; + struct hifc_pcidev *dev; + + if (!pdev) + return NULL; + + pci_adapter = pci_get_drvdata(pdev); + if (!pci_adapter) + return NULL; + + chip_node = pci_adapter->chip_node; + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (dev->hwdev && hifc_func_type(dev->hwdev) == TYPE_PPF) { + lld_dev_put(); + return dev->hwdev; + } + } + lld_dev_put(); + + return NULL; +} + +void hifc_event(struct hifc_lld_dev *lld_dev, void *uld_dev, + struct hifc_event_info *event); + +void hifc_event_process(void *adapter, struct hifc_event_info *event) +{ + struct hifc_pcidev *dev = adapter; + + if (event->type == HIFC_EVENT_FMW_ACT_NTC) + return hifc_sync_time_to_fmw(dev); + else if (event->type == HIFC_EVENT_MCTP_GET_HOST_INFO) + return __mctp_get_host_info(dev, &event->mctp_info); + + if (test_and_set_bit(SERVICE_T_FC, &dev->state)) { + sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler is in detach\n", + event->type); + return; + } + + hifc_event(&dev->lld_dev, dev->uld_dev, event); + clear_bit(SERVICE_T_FC, &dev->state); +} + +static int mapping_bar(struct pci_dev *pdev, struct hifc_pcidev *pci_adapter) +{ + u64 dwqe_addr; + + pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, HIFC_PCI_CFG_REG_BAR); + if (!pci_adapter->cfg_reg_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map configuration regs\n"); + return -ENOMEM; + } + + pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, + HIFC_PCI_INTR_REG_BAR); + if (!pci_adapter->intr_reg_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map interrupt regs\n"); + goto map_intr_bar_err; + } + + pci_adapter->db_base_phy = pci_resource_start(pdev, HIFC_PCI_DB_BAR); + pci_adapter->db_base = ioremap(pci_adapter->db_base_phy, + HIFC_DB_DWQE_SIZE); + if (!pci_adapter->db_base) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to map doorbell regs\n"); + goto map_db_err; + } + + dwqe_addr = pci_adapter->db_base_phy + HIFC_DB_DWQE_SIZE; + +#if defined(__aarch64__) + /* arm do not support call ioremap_wc() */ + pci_adapter->dwqe_mapping = __ioremap(dwqe_addr, HIFC_DB_DWQE_SIZE, + __pgprot(PROT_DEVICE_nGnRnE)); +#else + pci_adapter->dwqe_mapping = io_mapping_create_wc(dwqe_addr, + HIFC_DB_DWQE_SIZE); + +#endif /* end of "defined(__aarch64__)" */ + if (!pci_adapter->dwqe_mapping) { + sdk_err(&pci_adapter->pcidev->dev, "Failed to io_mapping_create_wc\n"); + goto mapping_dwqe_err; + } + + return 0; + +mapping_dwqe_err: + iounmap(pci_adapter->db_base); + +map_db_err: + iounmap(pci_adapter->intr_reg_base); + +map_intr_bar_err: + iounmap(pci_adapter->cfg_reg_base); + + return -ENOMEM; +} + +static void unmapping_bar(struct hifc_pcidev *pci_adapter) +{ +#if defined(__aarch64__) + iounmap(pci_adapter->dwqe_mapping); +#else + io_mapping_free(pci_adapter->dwqe_mapping); +#endif /* end of "defined(__aarch64__)" */ + + iounmap(pci_adapter->db_base); + iounmap(pci_adapter->intr_reg_base); + iounmap(pci_adapter->cfg_reg_base); +} + +static int alloc_chip_node(struct hifc_pcidev *pci_adapter) +{ + struct card_node *chip_node; + unsigned char i; + unsigned char parent_bus_number = 0; + + if (!pci_is_root_bus(pci_adapter->pcidev->bus)) + parent_bus_number = pci_adapter->pcidev->bus->parent->number; + + if (parent_bus_number != 0) { + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + if (chip_node->dp_bus_num == parent_bus_number) { + pci_adapter->chip_node = chip_node; + return 0; + } + } + } + + for (i = 0; i < MAX_CARD_ID; i++) { + if (!FIND_BIT(card_bit_map, i)) { + card_bit_map = (u64)SET_BIT(card_bit_map, i); + break; + } + } + + if (i == MAX_CARD_ID) { + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc card id\n"); + return -EFAULT; + } + + chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); + if (!chip_node) { + card_bit_map = CLEAR_BIT(card_bit_map, i); + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc chip node\n"); + return -ENOMEM; + } + + chip_node->dbgtool_attr_file.name = kzalloc(IFNAMSIZ, GFP_KERNEL); + if (!(chip_node->dbgtool_attr_file.name)) { + kfree(chip_node); + card_bit_map = CLEAR_BIT(card_bit_map, i); + sdk_err(&pci_adapter->pcidev->dev, + "Failed to alloc dbgtool attr file name\n"); + return -ENOMEM; + } + + /* parent bus number */ + chip_node->dp_bus_num = parent_bus_number; + + snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d", HIFC_CHIP_NAME, i); + snprintf((char *)chip_node->dbgtool_attr_file.name, + IFNAMSIZ, "%s%d", HIFC_CHIP_NAME, i); + sdk_info(&pci_adapter->pcidev->dev, + "Add new chip %s to global list succeed\n", + chip_node->chip_name); + + list_add_tail(&chip_node->node, &g_hinic_chip_list); + + INIT_LIST_HEAD(&chip_node->func_list); + pci_adapter->chip_node = chip_node; + + mutex_init(&chip_node->sfp_mutex); + + return 0; +} + +static void free_chip_node(struct hifc_pcidev *pci_adapter) +{ + struct card_node *chip_node = pci_adapter->chip_node; + u32 id; + int err; + + if (list_empty(&chip_node->func_list)) { + list_del(&chip_node->node); + sdk_info(&pci_adapter->pcidev->dev, + "Delete chip %s from global list succeed\n", + chip_node->chip_name); + err = sscanf(chip_node->chip_name, HIFC_CHIP_NAME "%u", &id); + if (err < 0) + sdk_err(&pci_adapter->pcidev->dev, "Failed to get hifc id\n"); + + card_bit_map = CLEAR_BIT(card_bit_map, id); + + kfree(chip_node->dbgtool_attr_file.name); + kfree(chip_node); + } +} + +static int config_pci_dma_mask(struct pci_dev *pdev) +{ + int err; + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + sdk_err(&pdev->dev, "Failed to set DMA mask\n"); + return err; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + sdk_warn(&pdev->dev, + "Couldn't set 64-bit coherent DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + sdk_err(&pdev->dev, + "Failed to set coherent DMA mask\n"); + return err; + } + } + + return 0; +} + +static int hifc_pci_init(struct pci_dev *pdev) +{ + struct hifc_pcidev *pci_adapter = NULL; + int err; + + pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); + if (!pci_adapter) { + sdk_err(&pdev->dev, + "Failed to alloc pci device adapter\n"); + return -ENOMEM; + } + pci_adapter->pcidev = pdev; + mutex_init(&pci_adapter->pdev_mutex); + + pci_set_drvdata(pdev, pci_adapter); + + err = pci_enable_device(pdev); + if (err) { + sdk_err(&pdev->dev, "Failed to enable PCI device\n"); + goto pci_enable_err; + } + + err = pci_request_regions(pdev, HIFC_DRV_NAME); + if (err) { + sdk_err(&pdev->dev, "Failed to request regions\n"); + goto pci_regions_err; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + err = config_pci_dma_mask(pdev); + if (err) + goto dma_mask_err; + + return 0; + +dma_mask_err: + pci_clear_master(pdev); + pci_release_regions(pdev); + +pci_regions_err: + pci_disable_device(pdev); + +pci_enable_err: + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); + + return err; +} + +static void hifc_pci_deinit(struct pci_dev *pdev) +{ + struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(pci_adapter); +} + +static int hifc_func_init(struct pci_dev *pdev, + struct hifc_pcidev *pci_adapter) +{ + struct hifc_init_para init_para; + + int err; + + init_para.adapter_hdl = pci_adapter; + init_para.pcidev_hdl = pdev; + init_para.dev_hdl = &pdev->dev; + init_para.cfg_reg_base = pci_adapter->cfg_reg_base; + init_para.intr_reg_base = pci_adapter->intr_reg_base; + init_para.db_base = pci_adapter->db_base; + init_para.db_base_phy = pci_adapter->db_base_phy; + init_para.dwqe_mapping = pci_adapter->dwqe_mapping; + init_para.hwdev = &pci_adapter->hwdev; + init_para.chip_node = pci_adapter->chip_node; + init_para.ppf_hwdev = hifc_get_ppf_hwdev_by_pdev(pdev); + err = hifc_init_hwdev(&init_para); + if (err) { + pci_adapter->hwdev = NULL; + sdk_err(&pdev->dev, "Failed to initialize hardware device\n"); + return -EFAULT; + } + + pci_adapter->init_state = HIFC_INIT_STATE_HWDEV_INITED; + + pci_adapter->lld_dev.pdev = pdev; + pci_adapter->lld_dev.hwdev = pci_adapter->hwdev; + + hifc_event_register(pci_adapter->hwdev, pci_adapter, + hifc_event_process); + + hifc_sync_time_to_fmw(pci_adapter); + + lld_lock_chip_node(); + err = dbgtool_knl_init(pci_adapter->hwdev, pci_adapter->chip_node); + if (err) { + lld_unlock_chip_node(); + sdk_err(&pdev->dev, "Failed to initialize dbgtool\n"); + hifc_event_unregister(pci_adapter->hwdev); + return err; + } + lld_unlock_chip_node(); + pci_adapter->init_state = HIFC_INIT_STATE_DBGTOOL_INITED; + + attach_uld(pci_adapter); + + sdk_info(&pdev->dev, "Pcie device probed\n"); + pci_adapter->init_state = HIFC_INIT_STATE_ALL_INITED; + + return 0; +} + +static void hifc_func_deinit(struct pci_dev *pdev) +{ + struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev); + + /* When function deinit, disable mgmt initiative report events firstly, + * then flush mgmt work-queue. + */ + if (pci_adapter->init_state >= HIFC_INIT_STATE_ALL_INITED) + detach_uld(pci_adapter); + + hifc_disable_mgmt_msg_report(pci_adapter->hwdev); + if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_PART_INITED) + hifc_flush_mgmt_workq(pci_adapter->hwdev); + + hifc_set_func_deinit_flag(pci_adapter->hwdev); + + if (pci_adapter->init_state >= HIFC_INIT_STATE_DBGTOOL_INITED) { + lld_lock_chip_node(); + dbgtool_knl_deinit(pci_adapter->hwdev, pci_adapter->chip_node); + lld_unlock_chip_node(); + hifc_event_unregister(pci_adapter->hwdev); + } + + if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED) { + /*Remove the current node from node-list first, + * then it's safe to free hwdev + */ + lld_lock_chip_node(); + list_del(&pci_adapter->node); + lld_unlock_chip_node(); + + hifc_free_hwdev(pci_adapter->hwdev); + } +} + +static void remove_func(struct hifc_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pcidev; + + switch (pci_adapter->init_state) { + case HIFC_INIT_STATE_ALL_INITED: + /*lint -fallthrough*/ + + case HIFC_INIT_STATE_DBGTOOL_INITED: + case HIFC_INIT_STATE_HWDEV_INITED: + case HIFC_INIT_STATE_HW_PART_INITED: + case HIFC_INIT_STATE_HW_IF_INITED: + case HIFC_INIT_STATE_PCI_INITED: + set_bit(HIFC_FUNC_IN_REMOVE, &pci_adapter->flag); + + if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED) + hifc_func_deinit(pdev); + + lld_lock_chip_node(); + if (pci_adapter->init_state < HIFC_INIT_STATE_HW_IF_INITED) + list_del(&pci_adapter->node); + nictool_k_uninit(); + free_chip_node(pci_adapter); + lld_unlock_chip_node(); + unmapping_bar(pci_adapter); + hifc_pci_deinit(pdev); + + /*lint -fallthrough*/ + break; + + default: + break; + } +} + +static void hifc_hwdev_remove(struct pci_dev *pdev) +{ + struct hifc_pcidev *pci_adapter = pci_get_drvdata(pdev); + + if (!pci_adapter) + return; + + sdk_info(&pdev->dev, "Pcie device remove begin\n"); + + if (pci_adapter->init_state >= HIFC_INIT_STATE_HW_IF_INITED) + hifc_detect_hw_present(pci_adapter->hwdev); + + remove_func(pci_adapter); + + sdk_info(&pdev->dev, "Pcie device removed\n"); +} + +static int hifc_hwdev_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct hifc_pcidev *pci_adapter; + int err; + + sdk_info(&pdev->dev, "Pcie device probe begin\n"); + + err = hifc_pci_init(pdev); + if (err) + return err; + + pci_adapter = pci_get_drvdata(pdev); + clear_bit(HIFC_FUNC_PRB_ERR, &pci_adapter->flag); + clear_bit(HIFC_FUNC_PRB_DELAY, &pci_adapter->flag); + err = mapping_bar(pdev, pci_adapter); + if (err) { + sdk_err(&pdev->dev, "Failed to map bar\n"); + goto map_bar_failed; + } + + pci_adapter->id = *id; + + /* if chip information of pcie function exist, + * add the function into chip + */ + lld_lock_chip_node(); + err = alloc_chip_node(pci_adapter); + if (err) { + sdk_err(&pdev->dev, + "Failed to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + err = nictool_k_init(); + if (err) { + sdk_warn(&pdev->dev, "Failed to init nictool"); + goto init_nictool_err; + } + list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); + + lld_unlock_chip_node(); + + pci_adapter->init_state = HIFC_INIT_STATE_PCI_INITED; + + err = hifc_func_init(pdev, pci_adapter); + if (err) + goto func_init_err; + + return 0; + +func_init_err: + if (!test_bit(HIFC_FUNC_PRB_DELAY, &pci_adapter->flag)) + set_bit(HIFC_FUNC_PRB_ERR, &pci_adapter->flag); + return 0; +init_nictool_err: + free_chip_node(pci_adapter); +alloc_chip_node_fail: + lld_unlock_chip_node(); + unmapping_bar(pci_adapter); + +map_bar_failed: + hifc_pci_deinit(pdev); + + sdk_err(&pdev->dev, "Pcie device probe failed\n"); + return err; +} + +#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define HIFC_DEV_ID_1822_8G 0x0212 +#define HIFC_DEV_ID_1822_16G 0x0203 +#define HIFC_DEV_ID_1822_32G 0x0202 + +/*lint -save -e133 -e10*/ +static const struct pci_device_id hifc_pci_table[] = { + {PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_8G), 0}, + {PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_16G), 0}, + {PCI_VDEVICE(HUAWEI, HIFC_DEV_ID_1822_32G), 0}, + {0, 0} +}; + +/*lint -restore*/ +MODULE_DEVICE_TABLE(pci, hifc_pci_table); + +static void hifc_shutdown(struct pci_dev *pdev) +{ + sdk_err(&pdev->dev, "Shutdown device\n"); + + pci_disable_device(pdev); +} + +static struct pci_driver hifc_driver = { + .name = HIFC_DRV_NAME, + .id_table = hifc_pci_table, + .probe = hifc_hwdev_probe, + .remove = hifc_hwdev_remove, + .shutdown = hifc_shutdown, +}; + +extern int hifc_init_module(void); +extern void hifc_exit_module(void); + +static int __init hifc_lld_init(void) +{ + pr_info("%s - version %s\n", HIFC_DRV_DESC, HIFC_DRV_VERSION); + + hifc_lld_lock_init(); + + hifc_init_module(); + + return pci_register_driver(&hifc_driver); +} + +static void __exit hifc_lld_exit(void) +{ + pci_unregister_driver(&hifc_driver); + hifc_exit_module(); +} + +module_init(hifc_lld_init); +module_exit(hifc_lld_exit); diff --git a/drivers/scsi/huawei/hifc/hifc_lld.h b/drivers/scsi/huawei/hifc/hifc_lld.h new file mode 100644 index 000000000000..2e063b36f202 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_lld.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_LLD_H_ +#define HIFC_LLD_H_ +#include "unf_common.h" +#define HIFC_PCI_VENDOR_ID (0x19e5) +#define HIFC_DRV_NAME "hifc_sdk" +#define HIFC_CHIP_NAME "hifc" +#define HIFC_DRV_VERSION UNF_FC_VERSION + +struct hifc_lld_dev { + struct pci_dev *pdev; + void *hwdev; +}; + +extern struct list_head g_hinic_chip_list; + +/* Structure pcidev private*/ +struct hifc_pcidev { + struct pci_dev *pcidev; + void *hwdev; + struct card_node *chip_node; + struct hifc_lld_dev lld_dev; + /* Record the service object address, + * such as hifc_dev and toe_dev, fc_dev + */ + void *uld_dev; + /* Record the service object name */ + char uld_dev_name[IFNAMSIZ]; + /* It is a the global variable for driver to manage + * all function device linked list + */ + struct list_head node; + + void __iomem *cfg_reg_base; + void __iomem *intr_reg_base; + u64 db_base_phy; + void __iomem *db_base; + +#if defined(__aarch64__) + void __iomem *dwqe_mapping; +#else + struct io_mapping *dwqe_mapping; +#endif + /* lock for attach/detach uld */ + struct mutex pdev_mutex; + + u32 init_state; + /* setted when uld driver processing event */ + unsigned long state; + struct pci_device_id id; + + unsigned long flag; +}; + +enum { + HIFC_FUNC_IN_REMOVE = BIT(0), + HIFC_FUNC_PRB_ERR = BIT(1), + HIFC_FUNC_PRB_DELAY = BIT(2), +}; + +enum hifc_init_state { + HIFC_INIT_STATE_NONE, + HIFC_INIT_STATE_PCI_INITED, + HIFC_INIT_STATE_HW_IF_INITED, + HIFC_INIT_STATE_HW_PART_INITED, + HIFC_INIT_STATE_HWDEV_INITED, + HIFC_INIT_STATE_DBGTOOL_INITED, + HIFC_INIT_STATE_ALL_INITED, +}; + +void lld_dev_put(void); +void lld_dev_hold(void); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_portmng.c b/drivers/scsi/huawei/hifc/hifc_portmng.c new file mode 100644 index 000000000000..34bc67552265 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_portmng.c @@ -0,0 +1,1273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "hifc_module.h" +#include "hifc_utils.h" +#include "hifc_hba.h" +#include "hifc_chipitf.h" +#include "hifc_portmng.h" + +struct hifc_port_diag_op_s hifc_diag_op[] = { + { UNF_PORT_DIAG_PORT_DETAIL, hifc_show_fc_port_detail }, + { UNF_PORT_DIAG_RD_WR_REG, hifc_rw_reg }, + { UNF_PORT_DIAG_BUTT, NULL } +}; + +char *wqe_type[HIFC_MAX_COUNTER_TYPE] = { + "TASK_TYPE_EMPTY", + "HIFC_SEND_IWRITE", + "HIFC_SEND_IREAD", + "HIFC_RECV_IRESP", + /* obsoleted */ + "HIFC_RECV_TCMND", + /* FCP Read IO Control Command. */ + "HIFC_SEND_TREAD", + /* FCP Write IO Control Command (XFER_RDY). */ + "HIFC_SEND_TWRITE", + /* Target Mode send FCP_RESP of Read/Write */ + "HIFC_SEND_TRESP", + /* Status for FCP_TREAD/FCP_TWRITE/FCP_TRESP */ + "HIFC_RECV_TSTS", + "HIFC_SEND_ABTS", + "HIFC_SEND_IELS", + "HIFC_SEND_ITMF", + "HIFC_SEND_CLEAN_UP", + "HIFC_SEND_CLEAN_UP_ALL", + /* Receive unsolicited data */ + "HIFC_RECV_UNSOLICITED", + "HIFC_RECV_ERR_WARN", + "HIFC_RECV_SESS_EN", + "HIFC_SEND_SESS_DIS", + "HIFC_SEND_SESS_DEL", + "HIFC_SEND_CQE_AVAILABLE", + /* Receive FCP_CMND From Remote Port and Transfer to driver. 20 */ + "HIFC_RECV_TCMND", + /* Receive ELS From Remote Port and Transfer to driver. */ + "HIFC_RECV_ELS_CMD", + /* Receive ELS From Remote Port and Transfer to driver. */ + "HIFC_RECV_ABTS_CMD", + /* Receive immidiate data. */ + "HIFC_RECV_IMMIDIATE", + /* + * ESL response. PLOGI_ACC, PRLI_ACC will carry the parent context + * parameter indication. + */ + "HIFC_SEND_ELS_RSP", + /* Status for ELS. */ + "HIFC_RECV_ELS_RSP_STS", + /* ABTS response with abort flag. */ + "HIFC_SEND_ABTS_RSP", + /* Status for ABTS. */ + "HIFC_RECV_ABTS_RSP_STS", + /* Abort Command */ + "HIFC_SEND_ABORT", + /* Status for ABORT. */ + "HIFC_RECV_ABORT_STS", + + "HIFC_SEND_ELS", + "HIFC_RECV_ELS_RSP", + /* GS request Command */ + "HIFC_SEND_GS", + /* GS response. */ + "HIFC_RECV_GS_RSP", + /* Status for offload req. */ + "HIFC_RECV_SESS_EN_STS", + /* Status for session disable. */ + "HIFC_RECV_SESS_DIS_STS", + /* Status for session delete. */ + "HIFC_RECV_SESS_DEL_STS", + /* Status for ABORT. */ + "HIFC_RECV_ABTS_RSP", + /* Buffer Clear */ + "HIFC_SEND_BUFFER_CLEAR", + /* Status for Buffer Clear */ + "HIFC_RECV_BUFFER_CLEAR_STS", + /* Flush Sq 40 */ + "HIFC_SEND_FLUSH_SQ", + /* Status for FLUSH_SQ */ + "HIFC_RECV_FLUSH_SQ_STS", + /* Reset session SQE type */ + "HIFC_SEND_SESS_RESET", + /* Reset session SCQE type */ + "HIFC_RECV_SESS_RESET_STS", + "HIFC_RECV_CQE_AVAILABLE_STS", + "HIFC_SEND_DUMP_EXCH", + "HIFC_SEND_INIT_SRQC", + "HIFC_SEND_CLEAR_SRQ", + "HIFC_RECV_CLEAR_SRQ_STS", + "HIFC_SEND_INIT_SCQC", + "HIFC_SEND_DEL_SCQC", + "HIFC_SEND_TMF_RESP", + "HIFC_SEND_DEL_SRQC", + "HIFC_RECV_IMMI_CONTINUE", + "HIFC_RECV_ITMF_RESP", + "HIFC_RECV_MARKER_STS", + "HIFC_SEND_TACK", + "HIFC_SEND_AEQERR", + "HIFC_RECV_ABTS_MARKER_STS" +}; + +char *scq_err_type[HIFC_MAX_COUNTER_TYPE] = { + "HIFC_CQE_COMPLETED", + "HIFC_SESS_HT_INSERT_FAIL", + "HIFC_SESS_HT_INSERT_DUPLICATE", + "HIFC_SESS_HT_BIT_SET_FAIL", + "HIFC_SESS_HT_DELETE_FAIL", + + "HIFC_CQE_BUFFER_CLEAR_IO_COMPLETED", + "HIFC_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED", + "HIFC_CQE_SESSION_RST_CLEAR_IO_COMPLETED", + "HIFC_CQE_TMF_RSP_IO_COMPLETED", + "HIFC_CQE_TMF_IO_COMPLETED", + "HIFC_CQE_DRV_ABORT_IO_COMPLETED", + "HIFC_CQE_DRV_ABORT_IO_IN_RSP_COMPLETED", + "HIFC_CQE_DRV_ABORT_IO_IN_CMD_COMPLETED", + "HIFC_CQE_WQE_FLUSH_IO_COMPLETED", + + "HIFC_ERROR_CODE_DATA_DIFX_FAILED", + "HIFC_ERROR_CODE_DATA_TASK_TYPE_INCORRECT", + "HIFC_ERROR_CODE_DATA_OOO_RO", + "HIFC_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS", + + "HIFC_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD", + "HIFC_ERROR_CODE_FCP_CONF_NOT_SUPPORTED", + "HIFC_ERROR_CODE_FCP_RSP_OPENED_SEQ", + + "HIFC_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE", + "HIFC_ERROR_CODE_XFER_PEND_XFER_SET", + "HIFC_ERROR_CODE_XFER_OOO_RO", + "HIFC_ERROR_CODE_XFER_NULL_BURST_LEN", + + "HIFC_ERROR_CODE_REC_TIMER_EXPIRE", + "HIFC_ERROR_CODE_E_D_TIMER_EXPIRE", + "HIFC_ERROR_CODE_ABORT_TIMER_EXPIRE", + "HIFC_ERROR_CODE_ABORT_MAGIC_NUM_NOT_MATCH", + "HIFC_IMMI_CMDPKT_SETUP_FAIL", + "HIFC_ERROR_CODE_DATA_SEQ_ID_NOT_EQUAL", + + "HIFC_ELS_GS_RSP_EXCH_CHECK_FAIL", + "HIFC_CQE_ELS_GS_SRQE_GET_FAIL", + "HIFC_CQE_DATA_DMA_REQ_FAIL", + "HIFC_CQE_SESSION_CLOSED", + "HIFC_SCQ_IS_FULL", + "HIFC_SRQ_IS_FULL", + "HIFC_DUCHILDCTX_SETUP_FAIL", + "HIFC_ERROR_INVALID_TXMFS", + "HIFC_OFFLOAD_LACKOF_SCQE_FAIL", + "HIFC_INVALID_TASK_ID", + "HIFC_INVALID_PKT_LEN", + "HIFC_CQE_ELS_GS_REQ_CLR_IO_COMPLETED", + "HIFC_CQE_ELS_RSP_CLR_IO_COMPLETED", + "HIFC_CODE_RESID_UNDER_ERR" +}; + +char *com_up_err_event_type[HIFC_MAX_COUNTER_TYPE] = { + "HIFC_EVENT_HEART_LOST", +}; + +char *aeq_err_type[HIFC_MAX_COUNTER_TYPE] = { + /* que_err_code */ + "HIFC_SCQ_IS_FULL_ERR", + "HIFC_SRQ_IS_FULL_ERR", + /* wqe_fatal_err */ + "HIFC_SQE_CHILD_SETUP_WQE_MSN_ERR", + "HIFC_SQE_CHILD_SETUP_WQE_GPA_ERR", + "HIFC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_1", + "HIFC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_2", + "HIFC_CLEAEQ_WQE_ERR", + "HIFC_WQEFETCH_WQE_MSN_ERR", + "HIFC_WQEFETCH_QUINFO_ERR", + + /* ctx_fatal_err */ + "HIFC_SCQE_ERR_BIT_ERR", + "HIFC_UPDMA_ADDR_REQ_SRQ_ERR", + "HIFC_SOLICHILDDMA_ADDR_REQ_ERR", + "HIFC_UNSOLICHILDDMA_ADDR_REQ_ERR", + "HIFC_SQE_CHILD_SETUP_QINFO_ERR_1", + "HIFC_SQE_CHILD_SETUP_QINFO_ERR_2", + "HIFC_CMDPKT_CHILD_SETUP_QINFO_ERR_1", + "HIFC_CMDPKT_CHILD_SETUP_QINFO_ERR_2", + "HIFC_CMDPKT_CHILD_SETUP_PMSN_ERR", + "HIFC_CLEAEQ_CTX_ERR", + "HIFC_WQEFETCH_CTX_ERR", + "HIFC_FLUSH_QPC_ERR_LQP", + "HIFC_FLUSH_QPC_ERR_SMF", + "HIFC_PREFETCH_QPC_ERR_1", + "HIFC_PREFETCH_QPC_ERR_2", + "HIFC_PREFETCH_QPC_ERR_3", + "HIFC_PREFETCH_QPC_ERR_4", + "HIFC_PREFETCH_QPC_ERR_5", + "HIFC_PREFETCH_QPC_ERR_6", + "HIFC_PREFETCH_QPC_ERR_7", + "HIFC_PREFETCH_QPC_ERR_8", + "HIFC_PREFETCH_QPC_ERR_9", + "HIFC_PREFETCH_QPC_ERR_10", + "HIFC_PREFETCH_QPC_ERR_11", + "HIFC_PREFETCH_QPC_ERR_DEFAULT", + "HIFC_CHILDHASH_INSERT_SW_ERR", + "HIFC_CHILDHASH_LOOKUP_SW_ERR", + "HIFC_CHILDHASH_DEL_SW_ERR", + "HIFC_FLOWHASH_INSERT_SW_ERR", + "HIFC_FLOWHASH_LOOKUP_SW_ERR", + "HIFC_FLOWHASH_DEL_SW_ERR", +}; + +char *err_event_type[HIFC_MAX_COUNTER_TYPE] = { + /* ERR type 0 Err value */ + "HIFC_DRV_2_UP_PARA_ERR", + /* ERR type 1 Err value */ + "HIFC_SFP_SPEED_ERR", + /* ERR type 2 Err value */ + "HIFC_32GPUB_UA_RXESCH_FIFO_OF", + "HIFC_32GPUB_UA_RXESCH_FIFO_UCERR", + + /* ERR type 3 Err value */ + "HIFC_32G_UA_UATX_LEN_ABN", + "HIFC_32G_UA_RXAFIFO_OF", + "HIFC_32G_UA_TXAFIFO_OF", + "HIFC_32G_UA_RXAFIFO_UCERR", + "HIFC_32G_UA_TXAFIFO_UCERR", + + /* ERR type 4 Err value */ + "HIFC_32G_MAC_RX_BBC_FATAL", + "HIFC_32G_MAC_TX_BBC_FATAL", + "HIFC_32G_MAC_TXFIFO_UF", + "HIFC_32G_MAC_PCS_TXFIFO_UF", + "HIFC_32G_MAC_RXBBC_CRDT_TO", + "HIFC_32G_MAC_PCS_RXAFIFO_OF", + "HIFC_32G_MAC_PCS_TXFIFO_OF", + "HIFC_32G_MAC_FC2P_RXFIFO_OF", + "HIFC_32G_MAC_FC2P_TXFIFO_OF", + "HIFC_32G_MAC_FC2P_CAFIFO_OF", + "HIFC_32G_MAC_PCS_RXRSFECM_UCEER", + "HIFC_32G_MAC_PCS_RXAFIFO_UCEER", + "HIFC_32G_MAC_PCS_TXFIFO_UCEER", + "HIFC_32G_MAC_FC2P_RXFIFO_UCEER", + "HIFC_32G_MAC_FC2P_TXFIFO_UCEER", + + /* ERR type 5 Err value */ + "HIFC_NON32G_DFX_FC1_DFX_BF_FIFO", + "HIFC_NON32G_DFX_FC1_DFX_BP_FIFO", + "HIFC_NON32G_DFX_FC1_DFX_RX_AFIFO_ERR", + "HIFC_NON32G_DFX_FC1_DFX_TX_AFIFO_ERR", + "HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBUF_FIFO1", + "HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBBC_TO", + "HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXDAT_FIFO", + "HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXCMD_FIFO", + "HIFC_NON32G_DFX_FC1_ERR_R_RDY", + + /* ERR type 6 Err value */ + "HIFC_NON32G_MAC_FC1_FAIRNESS_ERROR", +}; + +unsigned int hifc_set_port_state(void *v_hba, void *v_para_in) +{ + unsigned int ret = UNF_RETURN_ERROR; + enum unf_port_config_state_e port_state = UNF_PORT_CONFIG_STATE_START; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR); + + port_state = *((enum unf_port_config_state_e *)v_para_in); + switch (port_state) { + case UNF_PORT_CONFIG_STATE_RESET: + ret = (unsigned int)hifc_port_reset(v_hba); + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Cannot set port_state(0x%x)", port_state); + break; + } + + return ret; +} + +unsigned int hifc_set_port_speed(void *v_hba, void *v_para_in) +{ + unsigned long flags = 0; + unsigned int port_speed = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR); + port_speed = *((unsigned int *)v_para_in); + + if (port_speed > hba->max_support_speed) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Speed set(0x%x) exceed max speed(0x%x)", + port_speed, + hba->max_support_speed); + return UNF_RETURN_ERROR; + } + + if ((port_speed >= HIFC_SPEED_16G) && + (hba->port_topo_cfg == UNF_TOP_LOOP_MASK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Cannot set speed(0x%x) in LOOP mode, check it", + port_speed); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->port_speed_cfg = port_speed; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + if (hifc_port_reset(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset HBA", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC port(0x%x) set port speed finished, configured speed: 0x%x", + hba->port_cfg.port_id, port_speed); + + return RETURN_OK; +} + +unsigned int hifc_set_max_support_speed(void *v_hba, void *para_in) +{ + unsigned long flags = 0; + unsigned char max_support_speed = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != para_in, return UNF_RETURN_ERROR); + max_support_speed = *((unsigned char *)para_in); + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->max_support_speed = max_support_speed; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + if (hifc_port_reset(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset HBA", + hba->port_cfg.port_id); + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC set port(0x%x) speed finished, configured max support speed: 0x%x", + hba->port_cfg.port_id, max_support_speed); + + return RETURN_OK; +} + +unsigned int hifc_set_loop_role(void *v_hba, void *para_in) +{ + unsigned long flags = 0; + unsigned int loop_role = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != para_in, return UNF_RETURN_ERROR); + + loop_role = *((unsigned int *)para_in); + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->port_loop_role = loop_role; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + if (hifc_port_reset(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset HBA", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC port(0x%x) set loop role finished, configured loop role: 0x%x", + hba->port_cfg.port_id, loop_role); + + return RETURN_OK; +} + +unsigned int hifc_set_port_topo(void *v_hba, void *v_para_in) +{ + unsigned long flags = 0; + unsigned int top = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR); + + top = *((unsigned int *)v_para_in); + if ((top == UNF_TOP_LOOP_MASK) && + (hba->port_speed_cfg >= HIFC_SPEED_16G)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Cannot set to loop mode at speed(0x%x), check it", + hba->port_speed_cfg); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->port_topo_cfg = top; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + if (hifc_port_reset(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset HBA", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC port(0x%x) set port topology finished, configured topology: 0x%x", + hba->port_cfg.port_id, top); + + return RETURN_OK; +} + +unsigned int hifc_set_port_fcp_conf(void *v_hba, void *para_in) +{ + unsigned long flags = 0; + unsigned int fcp_conf = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, para_in, return UNF_RETURN_ERROR); + + fcp_conf = *((unsigned int *)para_in); + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->fcp_conf_cfg = fcp_conf; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC set port(0x%x) FCP confirm finished, configured value: 0x%x", + hba->port_cfg.port_id, fcp_conf); + + return RETURN_OK; +} + +unsigned int hifc_set_port_bbscn(void *v_hba, void *para_in) +{ + unsigned long flags = 0; + unsigned int bbscn = 0; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != para_in, return UNF_RETURN_ERROR); + + bbscn = *((unsigned int *)para_in); + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->port_bbscn_cfg = bbscn; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + if (hifc_port_reset(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset HBA", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]HIFC set port(0x%x) BBSCN finished, configured value: 0x%x", + hba->port_cfg.port_id, bbscn); + + return RETURN_OK; +} + +unsigned int hifc_show_fc_port_detail(void *v_hba, void *v_para) +{ + struct hifc_fw_ver_detail_s version; + void *ver_buf = NULL; + struct unf_fw_version_s *fw_version = (struct unf_fw_version_s *)v_para; + + memset(&version, 0, sizeof(struct hifc_fw_ver_detail_s)); + ver_buf = (void *)(&version); + + /* Obtain UP, ucode and boot version */ + if (hifc_get_software_version(v_hba, ver_buf) != RETURN_OK) + return UNF_RETURN_ERROR; + + if (fw_version->message_type == UNF_DEBUG_TYPE_MESSAGE) + memcpy(fw_version->fw_version, version.up_ver, HIFC_VER_LEN); + + return RETURN_OK; +} + +unsigned int hifc_port_diagnose(void *v_hba, + enum unf_port_diag_op_e op_code, + void *v_para) +{ + unsigned int op_idx = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]port diagnose succeed, opcode(0x%x), operation ID(0x%x)", + op_code, op_idx); + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_para, return UNF_RETURN_ERROR); + + for (op_idx = 0; op_idx < sizeof(hifc_diag_op) / + sizeof(struct hifc_port_diag_op_s); + op_idx++) { + if (op_code == hifc_diag_op[op_idx].op_code) { + if (!hifc_diag_op[op_idx].pfn_hifc_operation) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "[err]Null operation for diagnose, opcode(0x%x), operation ID(0x%x)", + op_code, op_idx); + + return UNF_RETURN_ERROR; + } else { + return hifc_diag_op[op_idx].pfn_hifc_operation(v_hba, v_para); + } + } + } + + return RETURN_OK; +} + +int hifc_dfx_get_rxtx_state(void *v_hba, void *v_buff_out) +{ + int ret = RETURN_OK; + unsigned long long *counter_info = NULL; + unsigned int probe_index = 0; + unsigned int index = 0; + unsigned int total = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out; + + counter_info = + vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + if (!counter_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]malloc memory failed"); + + return UNF_RETURN_ERROR; + } + + memset(counter_info, 0, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + probe_index = hba->probe_index; + total = sizeof(wqe_type) / sizeof(char *); + + for (index = 0; index < total; index++) { + if (wqe_type[index]) + counter_info[index] = HIFC_IO_STAT_READ(probe_index, + index); + } + + memcpy(buff_out->unresult.result, counter_info, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + vfree(counter_info); + return ret; +} + +int hifc_dfx_get_rxtx_error_state(void *v_hba, void *v_buff_out) +{ + char *hba_err_type[HIFC_HBA_STAT_BUTT]; + int ret = RETURN_OK; + unsigned long long *counter_info = NULL; + unsigned int probe_index = 0; + unsigned int index = 0; + unsigned int counter = 0; + unsigned int total = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out; + counter_info = + vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + if (!counter_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]malloc memory failed"); + return UNF_RETURN_ERROR; + } + + memset(counter_info, 0, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + probe_index = hba->probe_index; + total = sizeof(wqe_type) / sizeof(char *); + + for (index = 0; index < total; index++) { + if (wqe_type[index]) { + counter_info[counter] = + HIFC_ERR_IO_STAT_READ(probe_index, index); + counter++; + } + } + + total = sizeof(hba_err_type) / sizeof(char *); + for (index = 0; index < total; index++) { + counter_info[counter] = hba_stat[probe_index][index]; + counter++; + } + + memcpy(buff_out->unresult.result, counter_info, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + vfree(counter_info); + return ret; +} + +int hifc_dfx_get_error_state(void *v_hba, void *v_buff_out) +{ + int ret = RETURN_OK; + unsigned long long *counter_info = NULL; + unsigned int probe_index = 0; + unsigned int index = 0; + unsigned int counter = 0; + unsigned int total = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out; + + counter_info = + vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + if (!counter_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]malloc memory failed"); + return UNF_RETURN_ERROR; + } + memset(counter_info, 0, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + probe_index = hba->probe_index; + + total = sizeof(scq_err_type) / sizeof(char *); + for (index = 1; index < total; index++) { + if (scq_err_type[index]) { + counter_info[counter] = + HIFC_SCQ_ERR_TYPE_STAT_READ(probe_index, index); + counter++; + } + } + + total = sizeof(aeq_err_type) / sizeof(char *); + for (index = 0; index < total; index++) { + if (aeq_err_type[index]) { + counter_info[counter] = + HIFC_AEQ_ERR_TYPE_STAT_READ(probe_index, index); + counter++; + } + } + + total = sizeof(err_event_type) / sizeof(char *); + for (index = 0; index < total; index++) { + if (err_event_type[index]) { + counter_info[counter] = + HIFC_UP_ERR_EVENT_STAT_READ(probe_index, index); + counter++; + } + } + + total = sizeof(com_up_err_event_type) / sizeof(char *); + for (index = 0; index < total; index++) { + if (com_up_err_event_type[index]) { + counter_info[counter] = + HIFC_COM_UP_ERR_EVENT_STAT_READ(probe_index, + index); + counter++; + } + } + + memcpy(buff_out->unresult.result, counter_info, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + vfree(counter_info); + return ret; +} + +int hifc_dfx_get_link_state(void *v_hba, void *v_buff_out) +{ +#define HIFC_LINK_UNKNOW 0 +#define HIFC_LINK_UP 1 +#define HIFC_LINK_DOWN 2 +#define HIFC_FC_DELETE_CMND 3 +#define HIFC_LINK_DOWN_REASON 4 + + int ret = RETURN_OK; + unsigned int index; + unsigned int counter_index; + unsigned int *counter_info = NULL; + unsigned int probe_index = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out; + counter_info = vmalloc(sizeof(unsigned int) * HIFC_DFX_BACK_INFO_SIZE); + if (!counter_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]malloc memory failed"); + return UNF_RETURN_ERROR; + } + + memset(counter_info, 0, sizeof(unsigned int) * HIFC_DFX_BACK_INFO_SIZE); + probe_index = hba->probe_index; + + counter_info[HIFC_LINK_UP] = + (unsigned int)link_event_stat[probe_index][HIFC_LINK_UP]; + counter_info[HIFC_LINK_DOWN] = + (unsigned int)link_event_stat[probe_index][HIFC_LINK_DOWN]; + counter_info[HIFC_FC_DELETE_CMND] = + (unsigned int)link_event_stat[probe_index][HIFC_FC_DELETE_CMND]; + counter_info[HIFC_LINK_UNKNOW] = + (unsigned int)link_event_stat[probe_index][HIFC_LINK_UNKNOW]; + + for (index = 0; index < HIFC_MAX_LINK_REASON_CNT; index++) { + if (link_reason_stat[probe_index][index]) { + counter_index = HIFC_LINK_DOWN_REASON + index; + counter_info[counter_index] = + (unsigned int) + link_reason_stat[probe_index][index]; + } + } + + memcpy(buff_out->unresult.result, counter_info, + sizeof(unsigned int) * HIFC_DFX_BACK_INFO_SIZE); + vfree(counter_info); + return ret; +} + +int hifc_dfx_dif_error(void *v_hba, void *v_buff_out, unsigned int v_clear) +{ +#define HIFC_MAX_DIF_ERROR_COUNTER 8 + + int ret = RETURN_OK; + unsigned int index = 0; + unsigned int total = 0; + unsigned long long *counter_info = NULL; + unsigned int probe_index = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + buff_out = (struct hifc_adm_dfx_cmd_s *)v_buff_out; + + counter_info = + vmalloc(sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + if (!counter_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]malloc memory failed"); + return UNF_RETURN_ERROR; + } + memset(counter_info, 0, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + + probe_index = hba->probe_index; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info]The clear flag of DIF error counter is %u", v_clear); + + if (!v_clear) { + total = HIFC_MAX_DIF_ERROR_COUNTER; + + for (index = 1; index < total; index++) + counter_info[index - 1] = + HIFC_DIF_ERR_STAT_READ(probe_index, index); + + memcpy(buff_out->unresult.result, counter_info, + sizeof(unsigned long long) * HIFC_DFX_BACK_INFO_SIZE64); + } else { + memset(dif_err_stat[probe_index], 0, sizeof(dif_err_stat[0])); + } + vfree(counter_info); + return ret; +} + +int hifc_set_dfx_mode(void *v_hba, struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int mode; + struct hifc_adm_dfx_cmd_s *buff_in = NULL; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + + HIFC_CHECK(INVALID_VALUE32, v_input, return UNF_RETURN_ERROR); + + buff_in = v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + + HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + v_input->in_size >= sizeof(struct hifc_adm_dfx_cmd_s), + return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + *v_input->out_size >= sizeof(struct hifc_adm_dfx_cmd_s), + return UNF_RETURN_ERROR); + + buff_out->msg_head.status = HIFC_ADM_MSG_DONE; + mode = buff_in->cmd[0]; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Enter DFX mode(%u)", mode); + + switch (mode) { + /* HBA WQE and SCQE statistic */ + case HIFC_TX_RX_STATE_COUNTER: + ret = hifc_dfx_get_rxtx_state(v_hba, (void *)buff_out); + break; + + /* TX and RX error counter, HBA counter */ + case HIFC_TX_RX_ERROR_STATE_COUNTER: + ret = hifc_dfx_get_rxtx_error_state(v_hba, (void *)buff_out); + break; + + /* SCQ, AEQ, uP, common uP error counter */ + case HIFC_ERROR_STATE_COUNTER: + ret = hifc_dfx_get_error_state(v_hba, (void *)buff_out); + break; + + case HIFC_LINK_STATE_COUNTER: + ret = hifc_dfx_get_link_state(v_hba, (void *)buff_out); + break; + + case HIFC_HOST_COUNTER: + case HIFC_SESSION_COUNTER: + UNF_LOWLEVEL_TO_CM_HINICADM(ret, + ((struct hifc_hba_s *)v_hba)->lport, + v_input); + break; + + case HIFC_DIF_ERROR_COUNTER: + ret = hifc_dfx_dif_error(v_hba, (void *)buff_out, + buff_in->cmd[1]); + break; + + default: + break; + } + + if (ret != RETURN_OK) { + buff_out->msg_head.status = HIFC_ADM_MSG_FAILED; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Get DFX information failed, mode:0x%0x", mode); + } + + buff_out->msg_head.size = sizeof(struct hifc_adm_dfx_cmd_s); + *v_input->out_size = sizeof(struct hifc_adm_dfx_cmd_s); + + return ret; +} + +unsigned int hifc_fec_mode(void *v_hba, struct unf_hinicam_pkg *v_input) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int fec_mode = 0; + struct hifc_adm_cmd_s *buff_in = NULL; + struct hifc_adm_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_input, return UNF_RETURN_ERROR); + + buff_in = v_input->buff_in; + buff_out = (struct hifc_adm_cmd_s *)v_input->buff_out; + + HIFC_CHECK(INVALID_VALUE32, NULL != buff_in, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + v_input->in_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + *v_input->out_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + + buff_out->msg_head.status = HIFC_ADM_MSG_DONE; + fec_mode = buff_in->cmd[0]; + + if (fec_mode < HIFC_QUERY_FEC_MODE) { + ret = hifc_mbx_set_fec((struct hifc_hba_s *)v_hba, fec_mode); + hba->fec_status = fec_mode; + if (ret != RETURN_OK) { + buff_out->msg_head.status = HIFC_ADM_MSG_FAILED; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Set fec mode(0x%x) failed", fec_mode); + + return ret; + } + } else if (fec_mode == HIFC_QUERY_FEC_MODE) { + buff_out->cmd[0] = hba->fec_status; + ret = RETURN_OK; + } + + buff_out->msg_head.size = sizeof(struct hifc_adm_msg_head_s); + *v_input->out_size = sizeof(struct hifc_adm_cmd_s); + + return ret; +} + +unsigned int hifc_set_hba_base_info(void *v_hba, void *v_para_in) +{ +#define HIFC_MML_CLOSE_FEC 0 +#define HIFC_MML_OPEN_FEC_VIA_TTS 1 +#define HIFC_MML_OPEN_FEC_ONLY 2 + + struct unf_port_info_entry_s *port_info = 0; + struct hifc_hba_s *hba = v_hba; + unsigned long flags = 0; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != v_para_in, return UNF_RETURN_ERROR); + port_info = (struct unf_port_info_entry_s *)v_para_in; + + if (port_info->speed > hba->max_support_speed) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) Speed set(0x%x) exceed max speed(0x%x)", + hba->port_cfg.port_id, port_info->speed, + hba->max_support_speed); + + return UNF_RETURN_ERROR; + } + + if ((port_info->speed >= HIFC_SPEED_16G) && + (port_info->topo == UNF_TOP_LOOP_MASK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) Cannot set speed(0x%x) in LOOP mode, check it", + hba->port_cfg.port_id, port_info->speed); + + return UNF_RETURN_ERROR; + } + + if ((port_info->fec != HIFC_MML_CLOSE_FEC) && + (port_info->fec != HIFC_MML_OPEN_FEC_VIA_TTS) && + (port_info->fec != HIFC_MML_OPEN_FEC_ONLY)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) parameter error! please input 0,1 or 2!", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + if (hifc_mbx_set_fec(hba, port_info->fec) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) set FEC %u failed.\n", + hba->port_cfg.port_id, + port_info->fec); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->port_speed_cfg = port_info->speed; + hba->port_topo_cfg = port_info->topo; + hba->port_bbscn_cfg = port_info->bb_scn; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + return RETURN_OK; +} + +unsigned int hifc_bbscn_mode(void *v_hba, struct unf_hinicam_pkg *v_input) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int bbscn_mode; + struct hifc_adm_cmd_s *buff_in = NULL; + struct hifc_adm_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_input, return UNF_RETURN_ERROR); + + buff_in = v_input->buff_in; + buff_out = (struct hifc_adm_cmd_s *)v_input->buff_out; + + HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + v_input->in_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + + HIFC_CHECK(INVALID_VALUE32, + *v_input->out_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + + buff_out->msg_head.status = HIFC_ADM_MSG_DONE; + bbscn_mode = buff_in->cmd[0]; + + if (bbscn_mode == HIFC_SET_BBSCN_VALUE) { + UNF_LOWLEVEL_TO_CM_HINICADM(ret, hba->lport, v_input); + } else if (bbscn_mode == HIFC_QUERY_BBSCN_VALUE) { + ret = hifc_get_port_info((void *)hba); + if (hba->phy_link == UNF_PORT_LINK_UP) { + buff_out->cmd[0] = hba->active_bb_scn; + buff_out->cmd[1] = hba->port_bbscn_cfg; + } else { + buff_out->cmd[0] = UNF_FALSE; + buff_out->cmd[1] = hba->port_bbscn_cfg; + } + + buff_out->msg_head.size = sizeof(struct hifc_adm_msg_head_s) + + sizeof(unsigned int); + } + + if (ret != RETURN_OK) { + buff_out->msg_head.status = HIFC_ADM_MSG_FAILED; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Execute BBSCN mode(0x%x) failed", bbscn_mode); + + return ret; + } + + *v_input->out_size = sizeof(struct hifc_adm_cmd_s); + + return ret; +} + +unsigned int hifc_port_stat(void *v_hba, struct unf_hinicam_pkg *v_input) +{ + struct hifc_adm_lsq_info_s *buff_in = NULL; + struct hifc_adm_lsq_info_s *buff_out = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + unsigned int rport_start = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + unsigned int index = 0; + unsigned int queue_state[HIFC_QUEUE_STATE_BUTT] = { 0 }; + struct hifc_parent_sq_info_s *sq = NULL; + int out_standing_cnt = 0; + unsigned int in_sq_cnt = 0; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_input, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + buff_in = v_input->buff_in; + buff_out = (struct hifc_adm_lsq_info_s *)v_input->buff_out; + + HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + v_input->in_size >= sizeof(struct hifc_adm_lsq_info_s), + return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + *v_input->out_size >= sizeof(struct hifc_adm_lsq_info_s), + return UNF_RETURN_ERROR); + + rport_start = buff_in->cmd[0]; + + parent_queue_mgr = hba->parent_queue_mgr; + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port 0x%x Parent Queue Manager is Empty", + hba->port_cfg.port_id); + return UNF_RETURN_ERROR; + } + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + if (parent_queue_mgr->parent_queues[index].offload_state < + HIFC_QUEUE_STATE_BUTT) + queue_state[parent_queue_mgr->parent_queues[index].offload_state]++; + } + + buff_out->port_state.port_id = hba->port_cfg.port_id; + buff_out->port_state.rport_num = + (UNF_HIFC_MAXRPORT_NUM - queue_state[HIFC_QUEUE_STATE_FREE]); + buff_out->port_state.init = queue_state[HIFC_QUEUE_STATE_INITIALIZED]; + buff_out->port_state.offloading = + queue_state[HIFC_QUEUE_STATE_OFFLOADING]; + buff_out->port_state.offloaded = + queue_state[HIFC_QUEUE_STATE_OFFLOADED]; + buff_out->port_state.destroying = + queue_state[HIFC_QUEUE_STATE_DESTROYING]; + + index = rport_start; + + if ((index < UNF_HIFC_MAXRPORT_NUM) && + (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE)) { + sq = &parent_queue_mgr->parent_queues[index].parent_sq_info; + + buff_out->sq.sq_id = index; + buff_out->sq.rport_index = sq->rport_index; + buff_out->sq.xid = sq->context_id; + buff_out->sq.cid = sq->cache_id; + buff_out->sq.sid = sq->local_port_id; + buff_out->sq.did = sq->remote_port_id; + buff_out->sq.vpid = parent_queue_mgr->parent_queues[index].parent_sq_info.vport_id; + buff_out->sq.cmd_local_queue_id = parent_queue_mgr->parent_queues[index].parent_cmd_scq_info.local_queue_id; + buff_out->sq.cmd_cqm_queue_id = parent_queue_mgr->parent_queues[index].parent_cmd_scq_info.cqm_queue_id; + buff_out->sq.sts_local_queue_id = parent_queue_mgr->parent_queues[index].parent_sts_scq_info.local_queue_id; + buff_out->sq.sts_cqm_queue_id = parent_queue_mgr->parent_queues[index].parent_sts_scq_info.cqm_queue_id; + buff_out->sq.cos = + parent_queue_mgr->parent_queues[index].queue_data_cos; + buff_out->sq.off_load = + parent_queue_mgr->parent_queues[index].offload_state; + + out_standing_cnt = atomic_read(&sq->sqe_minus_cqe_cnt); + /* read memory barrier */ + rmb(); + in_sq_cnt = HIFC_QUEUE_MSN_OFFSET(HIFC_GET_QUEUE_CMSN(sq), + sq->last_pmsn); + /* read memory barrier */ + rmb(); + + buff_out->sq.cmsn = HIFC_GET_QUEUE_CMSN(sq); + buff_out->sq.pmsn = sq->last_pmsn; + buff_out->sq.db_cnt = atomic_read(&sq->sq_dbl_cnt); + buff_out->sq.sqe_cnt = atomic_read(&sq->sq_wqe_cnt); + buff_out->sq.cqe_cnt = atomic_read(&sq->sq_cqe_cnt); + buff_out->sq.in_sq_cnt = in_sq_cnt; + buff_out->sq.in_chip_cnt = out_standing_cnt - (int)in_sq_cnt; + + buff_out->mark = UNF_TRUE; + + } else { + buff_out->mark = UNF_FALSE; + } + + return RETURN_OK; +} + +unsigned int hifc_port_info(struct unf_hinicam_pkg *v_input) +{ +#define HIFC_INQUIRE_PORT_NUM_MODE 1 + + unsigned int ret = UNF_RETURN_ERROR; + unsigned int inquire_type; + unsigned int probe_total_num = 0; + unsigned int probe_index = 0; + unsigned int count = 0; + struct hifc_adm_cmd_s *buff_in = NULL; + struct hifc_adm_cmd_s *buff_out = NULL; + struct hifc_hba_s *hba = NULL; + + HIFC_CHECK(INVALID_VALUE32, v_input, return UNF_RETURN_ERROR); + + buff_in = v_input->buff_in; + buff_out = (struct hifc_adm_cmd_s *)v_input->buff_out; + + HIFC_CHECK(INVALID_VALUE32, buff_in, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + v_input->in_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, + *v_input->out_size >= sizeof(struct hifc_adm_cmd_s), + return UNF_RETURN_ERROR); + + hifc_get_total_probed_num(&probe_total_num); + + /* First bit is used to obtain total probe number */ + inquire_type = buff_in->cmd[0]; + if (inquire_type == HIFC_INQUIRE_PORT_NUM_MODE) { + buff_out->cmd[0] = probe_total_num; + buff_out->msg_head.status = HIFC_ADM_MSG_DONE; + *v_input->out_size = sizeof(struct hifc_adm_cmd_s); + + return RETURN_OK; + } + + spin_lock(&probe_spin_lock); + for (probe_index = 0; probe_index < HIFC_MAX_PROBE_PORT_NUM; + probe_index++) { + /* Second bit is used to determine to obtain which port */ + if (buff_in->cmd[1] == count) + break; + + if (test_bit((int)probe_index, + (const unsigned long *)probe_bit_map)) + count++; + } + spin_unlock(&probe_spin_lock); + + if (probe_index == HIFC_MAX_PROBE_PORT_NUM) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Can't find port(0x%x) total port(0x%x)", + buff_in->cmd[1], probe_total_num); + + buff_out->msg_head.status = HIFC_ADM_MSG_FAILED; + + return ret; + } + + hba = hifc_hba[probe_index]; + /* Obtain buffer length applied from user */ + v_input->in_size = buff_in->cmd[2]; + if (!hba) + return UNF_RETURN_ERROR; + + UNF_LOWLEVEL_TO_CM_HINICADM(ret, hba->lport, v_input); + + return ret; +} + +int hifc_adm(void *uld_dev, unsigned int msg_formate, void *buffin, + unsigned int in_size, void *buff_out, unsigned int *out_size) +{ + int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + struct unf_hinicam_pkg adm_pkg = { 0 }; + struct hifc_drv_version_s *ver_info; + char ver_str[HIFC_VER_INFO_SIZE] = { 0 }; + + HIFC_CHECK(INVALID_VALUE32, NULL != buff_out, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != buffin, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, NULL != out_size, return UNF_RETURN_ERROR); + + adm_pkg.msg_format = msg_formate; + adm_pkg.buff_in = buffin; + adm_pkg.buff_out = buff_out; + adm_pkg.in_size = in_size; + adm_pkg.out_size = out_size; + + if (msg_formate == HIFC_GET_DRIVER_VERSION) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Enter HIFC_GET_DRIVER_VERSION"); + + snprintf(ver_str, sizeof(ver_str), "%s %s", UNF_FC_VERSION, + __TIME_STR__); + + ver_info = (struct hifc_drv_version_s *)buff_out; + HIFC_CHECK(INVALID_VALUE32, + *out_size >= sizeof(struct hifc_drv_version_s), + return UNF_RETURN_ERROR); + memcpy(ver_info->ver, ver_str, sizeof(ver_str)); + + *(unsigned int *)out_size = sizeof(struct hifc_drv_version_s); + + return RETURN_OK; + } + + if (msg_formate == HIFC_COMPAT_TEST) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Enter driver compatibility test"); + /* UNF_TRUE: driver is compatible with hifcadm */ + *(unsigned char *)buff_out = UNF_TRUE; + *(unsigned int *)out_size = sizeof(unsigned char); + + return RETURN_OK; + } + + HIFC_CHECK(INVALID_VALUE32, NULL != uld_dev, return UNF_RETURN_ERROR); + hba = (struct hifc_hba_s *)uld_dev; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Enter hifc_adm, msg_formate(0x%x)", msg_formate); + + switch (msg_formate) { + case HIFC_DFX: + ret = hifc_set_dfx_mode((void *)hba, &adm_pkg); + break; + case HIFC_FEC_SET: + ret = (int)hifc_fec_mode((void *)hba, &adm_pkg); + break; + case HIFC_BBSCN: + ret = (int)hifc_bbscn_mode((void *)hba, &adm_pkg); + break; + + case HIFC_PORTSTAT: + ret = (int)hifc_port_stat((void *)hba, &adm_pkg); + break; + + case HIFC_ALL_INFO_OP: + ret = (int)hifc_port_info(&adm_pkg); + break; + + default: + UNF_LOWLEVEL_TO_CM_HINICADM(ret, hba->lport, &adm_pkg); + break; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Enter hifc_adm 0x%x", *adm_pkg.out_size); + + return ret; +} + diff --git a/drivers/scsi/huawei/hifc/hifc_portmng.h b/drivers/scsi/huawei/hifc/hifc_portmng.h new file mode 100644 index 000000000000..76e0884125dc --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_portmng.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_PORTMNG_H__ +#define __HIFC_PORTMNG_H__ + +#include "unf_common.h" +#include "hifc_module.h" +#include "hifc_hba.h" + +#define HIFC_PORT_INFO_SIZE 10 +#define HIFC_DFX_BACK_INFO_SIZE 406 +#define HIFC_DFX_BACK_INFO_SIZE64 203 +#define HIFC_GET_DRIVER_VERSION 16 +#define HIFC_SET_BBSCN_VALUE 0 +#define HIFC_QUERY_BBSCN_VALUE 1 +#define HIFC_QUERY_FEC_MODE 2 + +#define FC_DFX_SEND_INFO_SIZE 5 +#define FC_DFX_BACK_INFO_64 203 +#define FC_DFX_BACK_INFO_32 406 +#define FC_DFX_MAX_IO_RETURN_VALUE 0x12 +#define FC_DFX_MAX_SCSI_CMD 0xFF +#define FC_DFX_SCSI_CMD_FIRST_GET 100 + +struct unf_adm_dfx_session_state { + unsigned char session1 : 4; + unsigned char session2 : 4; +}; + +struct session_counter_s { + u64 target_busy; + u64 host_busy; + u64 remote_port_wwpn; + u64 local_port_wwpn; + u32 device_alloc; + u32 device_destroy; + u32 scsi_state; + u32 remote_port_nportid; + u32 remote_port_state; + u32 remote_port_scsiid; + u32 remote_port_index; + u32 local_port_nportid; + u32 local_port_ini_state; + u32 local_port_state; + u32 port_id; + u32 host_id; + u32 target_id; + u32 abort_io; + u32 device_reset; + u32 target_reset; + u32 bus_reset; + u32 virtual_reset; + u32 abort_io_result; + u32 device_reset_result; + u32 target_reset_result; + u32 bus_reset_result; + u32 virtual_reset_result; +}; + +enum hifc_adm_msg_status_e { + HIFC_ADM_MSG_DONE = 0, + HIFC_ADM_MSG_INCOMPLETE, + HIFC_ADM_MSG_FAILED, + HIFC_ADM_MSG_BUTT +}; + +struct hifc_port_diag_op_s { + enum unf_port_diag_op_e op_code; + unsigned int (*pfn_hifc_operation)(void *v_hba, void *v_para); +}; + +enum hifc_adm_dfx_mod_e { + /* HBA WQE and SCQE statistic */ + HIFC_TX_RX_STATE_COUNTER = 0, + /* TX and RX error counter, HBA counter */ + HIFC_TX_RX_ERROR_STATE_COUNTER, + /* SCQ, AEQ, uP, common uP error counter */ + HIFC_ERROR_STATE_COUNTER, + /* Link state counter */ + HIFC_LINK_STATE_COUNTER, + /* Host counter */ + HIFC_HOST_COUNTER, + /* session counter */ + HIFC_SESSION_COUNTER, + /* DIF error counter */ + HIFC_DIF_ERROR_COUNTER, + HIFC_ALL_DFX_TYPE = 50, +}; + +enum hifc_msg_format_e { + HIFC_DFX = 7, + HIFC_FEC_SET, + HIFC_BBSCN, + HIFC_PORTSTAT = 24, + HIFC_ALL_INFO_OP = 25, + HIFC_COMPAT_TEST = 0xFF +}; + +struct hifc_adm_msg_head_s { + unsigned int size; + unsigned short status; + unsigned short rsvd; +}; + +/* port state for fc_portstat */ +struct hifc_adm_port_state { + unsigned int port_id; + unsigned int rport_num; + unsigned int init; + unsigned int offloading; + unsigned int offloaded; + unsigned int destroying; +}; + +/* SQ & IoStat for fc_portstat */ +struct hifc_adm_sq { + unsigned int sq_id; + unsigned int rport_index; + unsigned int xid; + unsigned int cid; + unsigned int sid; + unsigned int did; + unsigned int vpid; + unsigned int cmd_local_queue_id; + unsigned int cmd_cqm_queue_id; + unsigned int sts_local_queue_id; + unsigned int sts_cqm_queue_id; + unsigned int cos; + unsigned int off_load; + unsigned int cmsn; + unsigned int pmsn; + unsigned int db_cnt; + unsigned int sqe_cnt; + unsigned int cqe_cnt; + unsigned int in_sq_cnt; + unsigned int in_chip_cnt; +}; + +/* hifcadm fc_portstat struct,that is used to show ListSqinfo from mml */ +struct hifc_adm_lsq_info_s { + struct hifc_adm_msg_head_s msg_head; + unsigned int cmd[HIFC_PORT_INFO_SIZE]; + struct hifc_adm_port_state port_state; + struct hifc_adm_sq sq; + unsigned int mark; +}; + +struct unf_adm_dfx_host_counter_s { + unsigned int host_num; + unsigned int port_id; + unsigned int scsi_session_add_success; + unsigned int scsi_session_add_failed; + unsigned int scsi_session_del_success; + unsigned int scsi_session_del_failed; + unsigned int device_alloc; + unsigned int device_destroy; + unsigned int session_loss_tmo; + unsigned int alloc_scsi_id; + unsigned int reuse_scsi_id; + unsigned int resume_scsi_id; + unsigned int add_start_work_failed; + unsigned int add_closing_work_failed; + unsigned int abort_io; + unsigned int device_reset; + unsigned int target_reset; + unsigned int bus_reset; + unsigned int virtual_reset; + unsigned int abort_io_result; + unsigned int device_reset_result; + unsigned int target_reset_result; + unsigned int bus_reset_result; + unsigned int virtual_reset_result; + struct unf_adm_dfx_session_state session_state[1024]; +}; + +/* hifcadm fc_port struct */ +struct hifc_adm_cmd_s { + struct hifc_adm_msg_head_s msg_head; + unsigned int cmd[HIFC_PORT_INFO_SIZE]; +}; + +/* hifcadm fc_dfx struct */ +struct hifc_adm_dfx_cmd_s { + struct hifc_adm_msg_head_s msg_head; + unsigned int cmd[HIFC_PORT_INFO_SIZE]; + union { + unsigned long long result[HIFC_DFX_BACK_INFO_SIZE64]; + struct unf_adm_dfx_host_counter_s host_cnt; + struct session_counter_s session_cnt; + unsigned long long scsi_cmd_in; + unsigned long long scsi_cmd_done; + unsigned long long target_busy; + unsigned long long host_busy; + } unresult; +}; + +unsigned int hifc_port_diagnose(void *v_hba, enum unf_port_diag_op_e op_code, + void *v_para); +unsigned int hifc_set_port_speed(void *v_hba, void *v_para_in); +unsigned int hifc_set_port_bbscn(void *v_hba, void *v_para_in); +unsigned int hifc_set_port_state(void *v_hba, void *v_para_in); +unsigned int hifc_set_port_topo(void *v_hba, void *v_para_in); +unsigned int hifc_set_port_fcp_conf(void *v_hba, void *v_para_in); +unsigned int hifc_set_loop_role(void *v_hba, void *v_para_in); +unsigned int hifc_set_max_support_speed(void *v_hba, void *v_para_in); +unsigned int hifc_show_fc_port_detail(void *v_hba, void *v_para); +int hifc_adm(void *uld_dev, unsigned int msg_formate, void *buffin, + unsigned int in_size, void *buff_out, unsigned int *out_size); +unsigned int hifc_fec_mode(void *v_hba, struct unf_hinicam_pkg *v_input); +int hifc_set_dfx_mode(void *v_hba, struct unf_hinicam_pkg *v_input); +int hifc_dfx_get_link_state(void *v_hba, void *v_buff_out); +int hifc_dfx_get_error_state(void *v_hba, void *v_buff_out); +int hifc_dfx_get_rxtx_state(void *v_hba, void *v_buff_out); +unsigned int hifc_bbscn_mode(void *v_hba, struct unf_hinicam_pkg *v_input); +unsigned int hifc_port_stat(void *v_hba, struct unf_hinicam_pkg *v_input); +int hifc_dfx_dif_error(void *v_hba, void *v_buff_out, unsigned int v_clear); +unsigned int hifc_set_hba_base_info(void *v_hba, void *v_para_in); + +#endif /* __HIFC_PORTMNG_H__ */ diff --git a/drivers/scsi/huawei/hifc/hifc_tool.c b/drivers/scsi/huawei/hifc/hifc_tool.c new file mode 100644 index 000000000000..c8455e7609d3 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_tool.c @@ -0,0 +1,785 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <net/sock.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_lld.h" +#include "hifc_dbgtool_knl.h" +#include "hifc_tool.h" +#include "hifc_portmng.h" + +#define HIADM_DEV_PATH "/dev/hifc_dev" +#define HIADM_DEV_CLASS "hifc_class" +#define HIADM_DEV_NAME "hifc_dev" + +#define MAJOR_DEV_NUM 921 +#define HIFC_CMDQ_BUF_MAX_SIZE 2048U +#define MSG_MAX_IN_SIZE (2048 * 1024) +#define MSG_MAX_OUT_SIZE (2048 * 1024) + +static dev_t g_dev_id = {0}; +static struct class *g_nictool_class; +static struct cdev g_nictool_cdev; + +static int g_nictool_init_flag; +static int g_nictool_ref_cnt; + +static void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in) +{ + if (!buf_in) + return; + + if (nt_msg->module == SEND_TO_UCODE) + hifc_free_cmd_buf(hwdev, buf_in); + else + kfree(buf_in); +} + +static int alloc_buff_in(void *hwdev, struct msg_module *nt_msg, + u32 in_size, void **buf_in) +{ + void *msg_buf; + + if (!in_size) + return 0; + + if (nt_msg->module == SEND_TO_UCODE) { + struct hifc_cmd_buf *cmd_buf; + + if (in_size > HIFC_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq in size(%u) more than 2KB\n", in_size); + return -ENOMEM; + } + + cmd_buf = hifc_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + pr_err("Alloc cmdq cmd buffer failed in %s\n", + __func__); + return -ENOMEM; + } + msg_buf = cmd_buf->buf; + *buf_in = (void *)cmd_buf; + cmd_buf->size = (u16)in_size; + } else { + if (in_size > MSG_MAX_IN_SIZE) { + pr_err("In size(%u) more than 2M\n", in_size); + return -ENOMEM; + } + msg_buf = kzalloc(in_size, GFP_KERNEL); + *buf_in = msg_buf; + } + if (!(*buf_in)) { + pr_err("Alloc buffer in failed\n"); + return -ENOMEM; + } + + if (copy_from_user(msg_buf, nt_msg->in_buff, in_size)) { + pr_err("%s:%d: Copy from user failed\n", + __func__, __LINE__); + free_buff_in(hwdev, nt_msg, *buf_in); + return -EFAULT; + } + + return 0; +} + +static void free_buff_out(void *hwdev, struct msg_module *nt_msg, + void *buf_out) +{ + if (!buf_out) + return; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) + hifc_free_cmd_buf(hwdev, buf_out); + else + kfree(buf_out); +} + +static int alloc_buff_out(void *hwdev, struct msg_module *nt_msg, + u32 out_size, void **buf_out) +{ + if (!out_size) + return 0; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) { + struct hifc_cmd_buf *cmd_buf; + + if (out_size > HIFC_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq out size(%u) more than 2KB\n", out_size); + return -ENOMEM; + } + + cmd_buf = hifc_alloc_cmd_buf(hwdev); + *buf_out = (void *)cmd_buf; + } else { + if (out_size > MSG_MAX_OUT_SIZE) { + pr_err("out size(%u) more than 2M\n", out_size); + return -ENOMEM; + } + *buf_out = kzalloc(out_size, GFP_KERNEL); + } + if (!(*buf_out)) { + pr_err("Alloc buffer out failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int copy_buf_out_to_user(struct msg_module *nt_msg, + u32 out_size, void *buf_out) +{ + int ret = 0; + void *msg_out; + + if (nt_msg->module == SEND_TO_UCODE && + !nt_msg->ucode_cmd.ucode_db.ucode_imm) + msg_out = ((struct hifc_cmd_buf *)buf_out)->buf; + else + msg_out = buf_out; + + if (copy_to_user(nt_msg->out_buf, msg_out, out_size)) + ret = -EFAULT; + + return ret; +} + +static int __get_card_usr_api_chain_mem(int card_idx) +{ +#define DBGTOOL_PAGE_ORDER 10 + + unsigned char *tmp; + int i; + + mutex_lock(&g_addr_lock); + card_id = card_idx; + if (!g_card_vir_addr[card_idx]) { + g_card_vir_addr[card_idx] = + (void *)__get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); + if (!g_card_vir_addr[card_idx]) { + pr_err("Alloc api chain memory fail for card %d.\n", + card_idx); + mutex_unlock(&g_addr_lock); + return -EFAULT; + } + + memset(g_card_vir_addr[card_idx], 0, + PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER)); + + g_card_phy_addr[card_idx] = + virt_to_phys(g_card_vir_addr[card_idx]); + if (!g_card_phy_addr[card_idx]) { + pr_err("phy addr for card %d is 0.\n", card_idx); + free_pages((unsigned long)g_card_vir_addr[card_idx], + DBGTOOL_PAGE_ORDER); + g_card_vir_addr[card_idx] = NULL; + mutex_unlock(&g_addr_lock); + return -EFAULT; + } + + tmp = g_card_vir_addr[card_idx]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + } + mutex_unlock(&g_addr_lock); + + return 0; +} + +static int get_card_func_info(char *dev_name, struct msg_module *nt_msg) +{ + struct hifc_card_func_info card_func_info = {0}; + int id, err; + + if (nt_msg->len_info.out_buff_len != sizeof(card_func_info) || + nt_msg->len_info.in_buff_len != sizeof(card_func_info)) { + pr_err("Invalid out_buf_size %d or Invalid in_buf_size %d, expect %lu\n", + nt_msg->len_info.out_buff_len, + nt_msg->len_info.in_buff_len, + sizeof(card_func_info)); + return -EINVAL; + } + + err = memcmp(dev_name, HIFC_CHIP_NAME, strlen(HIFC_CHIP_NAME)); + if (err) { + pr_err("Invalid chip name %s\n", dev_name); + return err; + } + + err = sscanf(dev_name, HIFC_CHIP_NAME "%d", &id); + if (err < 0) { + pr_err("Failed to get hifc id\n"); + return err; + } + + if (id >= MAX_CARD_NUM) { + pr_err("chip id %d exceed limit[0-%d]\n", id, MAX_CARD_NUM - 1); + return -EINVAL; + } + + hifc_get_card_func_info_by_card_name(dev_name, &card_func_info); + + if (!card_func_info.num_pf) { + pr_err("None function found for %s\n", dev_name); + return -EFAULT; + } + + err = __get_card_usr_api_chain_mem(id); + if (err) { + pr_err("Faile to get api chain memory for userspace %s\n", + dev_name); + return -EFAULT; + } + + card_func_info.usr_api_phy_addr = g_card_phy_addr[id]; + + /* Copy the dev_info to user mode */ + if (copy_to_user(nt_msg->out_buf, &card_func_info, + sizeof(card_func_info))) { + pr_err("Copy dev_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +static bool is_mgmt_cmd_support(void *hwdev, unsigned int mod, u32 up_api_type) +{ + if (FUNC_SUPPORT_MGMT(hwdev)) { + if (up_api_type == API_CLP) { + if (!hifc_is_hwdev_mod_inited + (hwdev, HIFC_HWDEV_CLP_INITED)) { + pr_err("CLP have not initialized\n"); + return false; + } + } else if (!hifc_is_hwdev_mod_inited + (hwdev, HIFC_HWDEV_MGMT_INITED)) { + pr_err("MGMT have not initialized\n"); + return false; + } + } else if (!hifc_is_hwdev_mod_inited + (hwdev, HIFC_HWDEV_MBOX_INITED)) { + pr_err("MBOX have not initialized\n"); + return false; + } + + return true; +} + +static bool is_hwdev_cmd_support(unsigned int mod, + char *ifname, u32 up_api_type) +{ + void *hwdev; + + hwdev = hifc_get_hwdev_by_ifname(ifname); + if (!hwdev) { + pr_err("Can not get the device %s correctly\n", ifname); + return false; + } + + switch (mod) { + case SEND_TO_UP: + case SEND_TO_SM: + return is_mgmt_cmd_support(hwdev, mod, up_api_type); + case SEND_TO_UCODE: + if (!hifc_is_hwdev_mod_inited(hwdev, + HIFC_HWDEV_CMDQ_INITED)) { + pr_err("CMDQ have not initialized\n"); + return false; + } + break; + + default: + return false; + } + + return true; +} + +static bool nictool_k_is_cmd_support(unsigned int mod, + char *ifname, u32 up_api_type) +{ + enum hifc_init_state init_state = + hifc_get_init_state_by_ifname(ifname); + + if (init_state == HIFC_INIT_STATE_NONE) + return false; + + if (mod == HIFCADM_FC_DRIVER) { + if (init_state < HIFC_INIT_STATE_ALL_INITED) { + pr_err("HIFC driver have not initialized\n"); + return false; + } + + return true; + } else if (mod >= SEND_TO_UCODE && mod <= SEND_TO_SM) { + return is_hwdev_cmd_support(mod, ifname, up_api_type); + } else if (mod == SEND_TO_HW_DRIVER) { + if (init_state < HIFC_INIT_STATE_HWDEV_INITED) { + pr_err("Hwdev have not initialized\n"); + return false; + } + + return true; + } + + return false; +} + +static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size, + void **buf_in, u32 out_size, void **buf_out) +{ + int ret; + + ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in); + if (ret) { + pr_err("Alloc tool cmd buff in failed\n"); + return ret; + } + + ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out); + if (ret) { + pr_err("Alloc tool cmd buff out failed\n"); + goto out_free_buf_in; + } + + return 0; + +out_free_buf_in: + free_buff_in(hwdev, nt_msg, *buf_in); + + return ret; +} + +static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg, + void *buf_in, void *buf_out) +{ + free_buff_out(hwdev, nt_msg, buf_out); + free_buff_in(hwdev, nt_msg, buf_in); +} + +static int get_all_chip_id_cmd(struct msg_module *nt_msg) +{ + struct nic_card_id card_id; + + hifc_get_all_chip_id((void *)&card_id); + + if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) { + pr_err("Copy chip id to user failed\n"); + return -EFAULT; + } + + return 0; +} + +static bool __is_pcidev_match_dev_name(const char *ifname, + struct hifc_pcidev *dev) +{ + if (!strncmp(dev->uld_dev_name, ifname, IFNAMSIZ)) + return true; + + if ((dev->uld_dev) && (strlen(ifname) == 0)) + return true; + + return false; +} + +struct hifc_pcidev *hifc_get_pcidev_by_dev_name(char *ifname) +{ + struct card_node *chip_node; + struct hifc_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (__is_pcidev_match_dev_name(ifname, dev)) { + lld_dev_put(); + return dev; + } + } + } + lld_dev_put(); + + return NULL; +} + +static void *get_support_uld_dev(struct msg_module *nt_msg) +{ + struct hifc_pcidev *dev; + + dev = hifc_get_pcidev_by_dev_name(nt_msg->device_name); + + if (dev) + return dev->uld_dev; + + return NULL; +} + +static int get_service_drv_version(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + enum hifc_service_type type; + int ret = 0; + + type = nt_msg->module - SEND_TO_SM; + if (type != SERVICE_T_FC) { + pr_err("err cmd type: %d\n", type); + return ret; + } + *out_size = sizeof(struct drv_version_info); + + ret = hifc_adm(NULL, nt_msg->msg_formate, buf_in, in_size, + buf_out, out_size); + if (ret) + return ret; + + if (copy_to_user(nt_msg->out_buf, buf_out, *out_size)) + return -EFAULT; + + return ret; +} + +int send_to_service_driver(struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + enum hifc_service_type type; + void *uld_dev; + int ret = -EINVAL; + + type = nt_msg->module - SEND_TO_SM; + + if (type == SERVICE_T_FC) { + uld_dev = get_support_uld_dev(nt_msg); + if (!uld_dev) + return -EINVAL; + ret = hifc_adm(uld_dev, + nt_msg->msg_formate, + buf_in, in_size, buf_out, + out_size); + } else { + pr_err("Ioctl input module id: %d is incorrectly\n", + nt_msg->module); + } + + return ret; +} + +static int nictool_exec_cmd(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + int ret; + + switch (nt_msg->module) { + case SEND_TO_HW_DRIVER: + ret = send_to_hw_driver(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_UP: + ret = send_to_up(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_UCODE: + ret = send_to_ucode(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + case SEND_TO_SM: + ret = send_to_sm(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + break; + default: + ret = send_to_service_driver(nt_msg, buf_in, in_size, buf_out, + out_size); + break; + } + + return ret; +} + +static bool hifc_is_special_handling_cmd(struct msg_module *nt_msg, int *ret) +{ + bool handled = true; + + if (nt_msg->module != SEND_TO_HW_DRIVER) + return false; + + switch (nt_msg->msg_formate) { + case GET_CHIP_ID: + *ret = get_all_chip_id_cmd(nt_msg); + break; + case GET_CHIP_INFO: + *ret = get_card_func_info(nt_msg->device_name, nt_msg); + break; + default: + handled = false; + break; + } + + return handled; +} + +static int do_nictool_ioctl_cmd(void *hwdev, struct msg_module *nt_msg) +{ + void *buf_out = NULL; + void *buf_in = NULL; + u32 out_size_expect; + u32 out_size, in_size; + int ret = 0; + + out_size_expect = nt_msg->len_info.out_buff_len; + in_size = nt_msg->len_info.in_buff_len; + + ret = alloc_tmp_buf(hwdev, nt_msg, in_size, + &buf_in, out_size_expect, &buf_out); + if (ret) { + pr_err("Alloc tmp buff failed\n"); + return ret; + } + + out_size = out_size_expect; + + if ((nt_msg->msg_formate == GET_DRV_VERSION) && + (nt_msg->module == HIFCADM_FC_DRIVER)) { + ret = get_service_drv_version(hwdev, nt_msg, buf_in, + in_size, buf_out, &out_size); + goto out_free_buf; + } + + ret = nictool_exec_cmd(hwdev, nt_msg, buf_in, + in_size, buf_out, &out_size); + if (ret) { + pr_err("nictool_exec_cmd failed, mod:%d msg_formate:%d\n", + nt_msg->module, nt_msg->msg_formate); + goto out_free_buf; + } + + if (out_size_expect && buf_out) { + ret = copy_buf_out_to_user(nt_msg, out_size_expect, buf_out); + if (ret) + pr_err("Copy information to user failed\n"); + } +out_free_buf: + free_tmp_buf(hwdev, nt_msg, buf_in, buf_out); + + return ret; +} + +static long nictool_k_unlocked_ioctl(struct file *pfile, + unsigned int cmd, unsigned long arg) +{ + void *hwdev; + struct msg_module nt_msg; + int ret = 0; + + memset(&nt_msg, 0, sizeof(nt_msg)); + + if (copy_from_user(&nt_msg, (void *)arg, sizeof(nt_msg))) { + pr_err("Copy information from user failed\n"); + return -EFAULT; + } + + /* end with '\0' */ + nt_msg.device_name[IFNAMSIZ - 1] = '\0'; + + hifc_tool_cnt_inc(); + if (hifc_is_special_handling_cmd(&nt_msg, &ret)) + goto out_free_lock; + + if (nt_msg.module == HIFCADM_FC_DRIVER && + nt_msg.msg_formate == GET_CHIP_ID) + get_fc_devname(nt_msg.device_name); + + if (!nictool_k_is_cmd_support(nt_msg.module, nt_msg.device_name, + nt_msg.up_cmd.up_db.up_api_type)) { + ret = -EFAULT; + goto out_free_lock; + } + + /* get the netdevice */ + hwdev = hifc_get_hwdev_by_ifname(nt_msg.device_name); + if (!hwdev) { + pr_err("Can not get the device %s correctly\n", + nt_msg.device_name); + ret = -ENODEV; + goto out_free_lock; + } + + ret = do_nictool_ioctl_cmd(hwdev, &nt_msg); + +out_free_lock: + hifc_tool_cnt_dec(); + + return (long)ret; +} + +static int nictool_k_open(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static const struct file_operations fifo_operations = { + .owner = THIS_MODULE, + .open = nictool_k_open, + .read = nictool_k_read, + .write = nictool_k_write, + .unlocked_ioctl = nictool_k_unlocked_ioctl, + .mmap = hifc_mem_mmap, +}; + +int if_nictool_exist(void) +{ + struct file *fp = NULL; + int exist = 0; + + fp = filp_open(HIADM_DEV_PATH, O_RDONLY, 0); + if (IS_ERR(fp)) { + exist = 0; + } else { + (void)filp_close(fp, NULL); + exist = 1; + } + + return exist; +} + +/** + * nictool_k_init - initialize the hw interface + */ +int nictool_k_init(void) +{ + int ret; + struct device *pdevice; + + if (g_nictool_init_flag) { + g_nictool_ref_cnt++; + /* already initialized */ + return 0; + } + + if (if_nictool_exist()) { + pr_err("Nictool device exists\n"); + return 0; + } + + /* Device ID: primary device ID (12bit) | + * secondary device number (20bit) + */ + g_dev_id = MKDEV(MAJOR_DEV_NUM, 0); + + /* Static device registration number */ + ret = register_chrdev_region(g_dev_id, 1, HIADM_DEV_NAME); + if (ret < 0) { + ret = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM_DEV_NAME); + if (ret < 0) { + pr_err("Register nictool_dev fail(0x%x)\n", ret); + return ret; + } + } + + /* Create equipment */ + /*lint -save -e160*/ + g_nictool_class = class_create(THIS_MODULE, HIADM_DEV_CLASS); + /*lint -restore*/ + if (IS_ERR(g_nictool_class)) { + pr_err("Create nictool_class fail\n"); + ret = -EFAULT; + goto class_create_err; + } + + /* Initializing the character device */ + cdev_init(&g_nictool_cdev, &fifo_operations); + + /* Add devices to the operating system */ + ret = cdev_add(&g_nictool_cdev, g_dev_id, 1); + if (ret < 0) { + pr_err("Add nictool_dev to operating system fail(0x%x)\n", ret); + goto cdev_add_err; + } + + /* Export device information to user space + * (/sys/class/class name/device name) + */ + pdevice = device_create(g_nictool_class, NULL, + g_dev_id, NULL, HIADM_DEV_NAME); + if (IS_ERR(pdevice)) { + pr_err("Export nictool device information to user space fail\n"); + ret = -EFAULT; + goto device_create_err; + } + + g_nictool_init_flag = 1; + g_nictool_ref_cnt = 1; + + pr_info("Register nictool_dev to system succeed\n"); + + return 0; + +device_create_err: + cdev_del(&g_nictool_cdev); + +cdev_add_err: + class_destroy(g_nictool_class); + +class_create_err: + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + + return ret; +} + +void nictool_k_uninit(void) +{ + if (g_nictool_init_flag) { + if ((--g_nictool_ref_cnt)) + return; + } + + g_nictool_init_flag = 0; + + if (!g_nictool_class || IS_ERR(g_nictool_class)) + return; + + cdev_del(&g_nictool_cdev); + device_destroy(g_nictool_class, g_dev_id); + class_destroy(g_nictool_class); + g_nictool_class = NULL; + + unregister_chrdev_region(g_dev_id, 1); + + pr_info("Unregister nictool_dev succeed\n"); +} diff --git a/drivers/scsi/huawei/hifc/hifc_tool.h b/drivers/scsi/huawei/hifc/hifc_tool.h new file mode 100644 index 000000000000..3aad6a4834a7 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_tool.h @@ -0,0 +1,332 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_NICTOOL_H_ +#define HIFC_NICTOOL_H_ +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif +/* completion timeout interval, unit is jiffies*/ +#define UP_COMP_TIME_OUT_VAL 10000U + +struct sm_in_st { + int node; + int id; + int instance; +}; + +struct sm_out_st { + u64 val1; + u64 val2; +}; + +struct up_log_msg_st { + u32 rd_len; + u32 addr; +}; + +struct csr_write_st { + u32 rd_len; + u32 addr; + u8 *data; +}; + +struct ipsurx_stats_info { + u32 addr; + u32 rd_cnt; +}; + +struct ucode_cmd_st { + union { + struct { + u32 comm_mod_type:8; + u32 ucode_cmd_type:4; + u32 cmdq_ack_type:3; + u32 ucode_imm:1; + u32 len:16; + } ucode_db; + u32 value; + }; +}; + +struct up_cmd_st { + union { + struct { + u32 comm_mod_type:8; + u32 chipif_cmd:8; + u32 up_api_type:16; + } up_db; + u32 value; + }; +}; + +struct _dcb_data { + u8 wr_flag; + u8 dcb_en; + u8 err; + u8 rsvd; +}; + +union _dcb_ctl { + struct _dcb_data dcb_data; + u32 data; +}; + +struct _pfc_data { + u8 pfc_en; + u8 pfc_priority; + u8 num_of_tc; + u8 err; +}; + +union _pfc { + struct _pfc_data pfc_data; + u32 data; +}; + +union _flag_com { + struct _ets_flag { + u8 flag_ets_enable:1; + u8 flag_ets_percent:1; + u8 flag_ets_cos:1; + u8 flag_ets_strict:1; + u8 rev:4; + } ets_flag; + u8 data; +}; + +struct _ets { + u8 ets_en; + u8 err; + u8 strict; + u8 tc[8]; + u8 ets_percent[8]; + union _flag_com flag_com; +}; + +#define API_CMD 0x1 +#define API_CHAIN 0x2 +#define API_CLP 0x3 + +struct msg_module { + char device_name[IFNAMSIZ]; + unsigned int module; + union { + u32 msg_formate; + struct ucode_cmd_st ucode_cmd; + struct up_cmd_st up_cmd; + }; + + struct { + u32 in_buff_len; + u32 out_buff_len; + } len_info; + u32 res; + void *in_buff; + void *out_buf; +}; + +#define MAX_VER_INFO_LEN 128 +struct drv_version_info { + char ver[MAX_VER_INFO_LEN]; +}; + +struct chip_fault_stats { + int offset; + u8 chip_faults[MAX_DRV_BUF_SIZE]; +}; + +struct hifc_wqe_info { + int q_id; + void *slq_handle; + unsigned int wqe_id; +}; + +struct hifc_tx_hw_page { + u64 phy_addr; + u64 *map_addr; +}; + +struct hifc_dbg_sq_info { + u16 q_id; + u16 pi; + u16 ci;/* sw_ci */ + u16 fi;/* hw_ci */ + + u32 q_depth; + u16 pi_reverse; + u16 weqbb_size; + + u8 priority; + u16 *ci_addr; + u64 cla_addr; + + void *slq_handle; + + struct hifc_tx_hw_page direct_wqe; + struct hifc_tx_hw_page db_addr; + u32 pg_idx; + + u32 glb_sq_id; +}; + +struct hifc_dbg_rq_info { + u16 q_id; + u16 glb_rq_id; + u16 hw_pi; + u16 ci; /* sw_ci */ + u16 sw_pi; + u16 wqebb_size; + u16 q_depth; + u16 buf_len; + + void *slq_handle; + u64 ci_wqe_page_addr; + u64 ci_cla_tbl_addr; + + u16 msix_idx; + u32 msix_vector; +}; + +#ifndef BUSINFO_LEN +#define BUSINFO_LEN (32) +#endif +struct pf_info { + char name[IFNAMSIZ]; + char bus_info[BUSINFO_LEN]; + u32 pf_type; +}; + +#ifndef MAX_SIZE +#define MAX_SIZE (16) +#endif +struct card_info { + struct pf_info pf[MAX_SIZE]; + u32 pf_num; +}; + +struct nic_card_id { + u32 id[MAX_SIZE]; + u32 num; +}; + +struct func_pdev_info { + u64 bar0_phy_addr; + u64 bar0_size; + u64 rsvd1[4]; +}; + +struct hifc_card_func_info { + u32 num_pf; + u32 rsvd0; + u64 usr_api_phy_addr; + struct func_pdev_info pdev_info[MAX_SIZE]; +}; + +#ifndef NIC_UP_CMD_UPDATE_FW +#define NIC_UP_CMD_UPDATE_FW (114) +#endif + +#ifndef MAX_CARD_NUM +#define MAX_CARD_NUM (64) +#endif +extern void *g_card_node_array[MAX_CARD_NUM]; +extern void *g_card_vir_addr[MAX_CARD_NUM]; +extern u64 g_card_phy_addr[MAX_CARD_NUM]; +extern struct mutex g_addr_lock; +extern int card_id; + +struct hifc_nic_loop_mode { + u32 loop_mode; + u32 loop_ctrl; +}; + +struct hifc_nic_poll_weight { + int poll_weight; +}; + +enum hifc_homologues_state { + HIFC_HOMOLOGUES_OFF = 0, + HIFC_HOMOLOGUES_ON = 1, +}; + +struct hifc_homologues { + enum hifc_homologues_state homo_state; +}; + +struct hifc_pf_info { + u32 isvalid; + u32 pf_id; +}; + +enum module_name { + SEND_TO_NIC_DRIVER = 1, + SEND_TO_HW_DRIVER, + SEND_TO_UCODE, + SEND_TO_UP, + SEND_TO_SM, + HIFCADM_FC_DRIVER = 10, +}; + +enum driver_cmd_type { + FUNC_TYPE = 12, + GET_FUNC_IDX, + GET_DRV_VERSION = 16, + GET_HW_STATS = 18, + CLEAR_HW_STATS, + GET_CHIP_FAULT_STATS = 21, + GET_CHIP_ID = 25, + GET_SINGLE_CARD_INFO, + GET_FIRMWARE_ACTIVE_STATUS, + GET_DEVICE_ID = 29, + IS_DRV_IN_VM = 44, + GET_CHIP_INFO = 48, + GET_PF_ID = 52, + PORT_ID = 0x42 +}; + +enum api_chain_cmd_type { + API_CSR_READ, + API_CSR_WRITE +}; + +enum sm_cmd_type { + SM_CTR_RD32 = 1, + SM_CTR_RD64_PAIR, + SM_CTR_RD64 +}; + +int nictool_k_init(void); +void nictool_k_uninit(void); + +int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +int send_to_sm(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +int send_to_up(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +int send_to_ucode(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +void get_fc_devname(char *devname); +void *hifc_get_hwdev_by_ifname(char *ifname); +enum hifc_init_state hifc_get_init_state_by_ifname(char *ifname); +void hifc_get_all_chip_id(void *id_info); +void hifc_tool_cnt_dec(void); +void hifc_tool_cnt_inc(void); +int hifc_get_device_id(void *hwdev, u16 *dev_id); +int hifc_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid); +bool hifc_is_valid_bar_addr(u64 offset); +void hifc_get_card_info(void *hwdev, void *bufin); +struct hifc_pcidev *hifc_get_pcidev_by_dev_name(char *ifname); +void hifc_get_card_func_info_by_card_name( + const char *chip_name, struct hifc_card_func_info *card_func); + +#endif + diff --git a/drivers/scsi/huawei/hifc/hifc_tool_hw.c b/drivers/scsi/huawei/hifc/hifc_tool_hw.c new file mode 100644 index 000000000000..f5e6c6c887e1 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_tool_hw.c @@ -0,0 +1,1010 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <net/sock.h> + +#include "hifc_knl_adp.h" +#include "hifc_hw.h" +#include "hifc_hwdev.h" +#include "hifc_hwif.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" +#include "hifc_cfg.h" +#include "hifc_lld.h" +#include "hifc_sml.h" +#include "hifc_tool.h" + +static atomic_t tool_used_cnt; + +typedef int (*hw_driv_module)(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +struct hw_drv_module_handle { + enum driver_cmd_type driv_cmd_name; + hw_driv_module driv_func; +}; + +u8 hifc_physical_port_id(void *hwdev) +{ + struct hifc_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting physical port id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.port_id; +} + +int hifc_clp_to_mgmt(void *hwdev, enum hifc_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hifc_hwdev *dev = hwdev; + int err; + + if (!dev) + return -EINVAL; + + if (!dev->chip_present_flag) + return -EPERM; + + if (!hifc_is_hwdev_mod_inited(hwdev, HIFC_HWDEV_CLP_INITED)) + return -EPERM; + + err = hifc_pf_clp_to_mgmt(dev, mod, cmd, buf_in, + in_size, buf_out, out_size); + + return err; +} + +static int get_func_type(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 func_typ; + + func_typ = hifc_func_type(hwdev); + if (!buf_out || *out_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + *(u16 *)buf_out = func_typ; + return 0; +} + +static int get_func_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 func_id; + + if (!buf_out || *out_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + func_id = hifc_global_func_id_hw(hwdev); + *(u16 *)buf_out = func_id; + *out_size = sizeof(u16); + return 0; +} + +static int get_drv_version(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct drv_version_info *ver_info; + char ver_str[MAX_VER_INFO_LEN] = {0}; + + if (*out_size != sizeof(*ver_info)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*ver_info)); + return -EFAULT; + } + snprintf(ver_str, sizeof(ver_str), "%s %s", + HIFC_DRV_VERSION, __TIME_STR__); + ver_info = (struct drv_version_info *)buf_out; + memcpy(ver_info->ver, ver_str, sizeof(ver_str)); + + return 0; +} + +static int clear_hw_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return 0; +} + +static int get_hw_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return 0; +} + +static void hifc_get_chip_fault_stats(const void *hwdev, + u8 *chip_fault_stats, int offset) +{ + int copy_len = offset + MAX_DRV_BUF_SIZE - HIFC_CHIP_FAULT_SIZE; + + if (offset < 0 || offset > HIFC_CHIP_FAULT_SIZE) { + pr_err("Invalid chip offset value: %d\n", + offset); + return; + } + + if (offset + MAX_DRV_BUF_SIZE <= HIFC_CHIP_FAULT_SIZE) + memcpy(chip_fault_stats, + ((struct hifc_hwdev *)hwdev)->chip_fault_stats + offset, + MAX_DRV_BUF_SIZE); + else + memcpy(chip_fault_stats, + ((struct hifc_hwdev *)hwdev)->chip_fault_stats + offset, + copy_len); +} + +static int get_chip_faults_stats(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int offset = 0; + struct chip_fault_stats *fault_info; + + if (!buf_in || !buf_out || *out_size != sizeof(*fault_info) || + in_size != sizeof(*fault_info)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(*fault_info)); + return -EFAULT; + } + fault_info = (struct chip_fault_stats *)buf_in; + offset = fault_info->offset; + fault_info = (struct chip_fault_stats *)buf_out; + hifc_get_chip_fault_stats(hwdev, fault_info->chip_faults, offset); + + return 0; +} + +static int get_chip_id_test(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + return 0; +} + +static int get_single_card_info(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (!buf_in || !buf_out || in_size != sizeof(struct card_info) || + *out_size != sizeof(struct card_info)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(struct card_info)); + return -EFAULT; + } + + hifc_get_card_info(hwdev, buf_out); + *out_size = in_size; + return 0; +} + +#define GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT 30 +static int get_firmware_active_status(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 loop_cnt = 0; + + while (loop_cnt < GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) { + if (!hifc_get_mgmt_channel_status(hwdev)) + return 0; + + msleep(1000); + loop_cnt++; + } + if (loop_cnt == GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) + return -ETIMEDOUT; + + return 0; +} + +static int get_device_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 dev_id; + int err; + + if (!buf_out || !buf_in || *out_size != sizeof(u16) || + in_size != sizeof(u16)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + err = hifc_get_device_id(hwdev, &dev_id); + if (err) + return err; + + *((u32 *)buf_out) = dev_id; + *out_size = in_size; + + return 0; +} + +bool hifc_is_in_host(void) +{ + struct card_node *chip_node; + struct hifc_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->init_state > HIFC_INIT_STATE_PCI_INITED) { + lld_dev_put(); + return true; + } + } + } + lld_dev_put(); + + return false; +} + +static int is_driver_in_vm(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + bool in_host; + + if (!buf_out || (*out_size != sizeof(u8))) + return -EINVAL; + + in_host = hifc_is_in_host(); + if (in_host) + *((u8 *)buf_out) = 0; + else + *((u8 *)buf_out) = 1; + + return 0; +} + +static int get_pf_id(void *hwdev, void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hifc_pf_info *pf_info; + u32 port_id = 0; + int err; + + if (!buf_out || (*out_size != sizeof(*pf_info)) || + !buf_in || in_size != sizeof(u32)) + return -EINVAL; + + port_id = *((u32 *)buf_in); + pf_info = (struct hifc_pf_info *)buf_out; + err = hifc_get_pf_id(hwdev, port_id, &pf_info->pf_id, + &pf_info->isvalid); + if (err) + return err; + + *out_size = sizeof(*pf_info); + + return 0; +} + +static struct hw_drv_module_handle hw_driv_module_cmd_handle[] = { + {FUNC_TYPE, get_func_type}, + {GET_FUNC_IDX, get_func_id}, + {GET_DRV_VERSION, get_drv_version}, + {GET_HW_STATS, get_hw_stats}, + {CLEAR_HW_STATS, clear_hw_stats}, + {GET_CHIP_FAULT_STATS, get_chip_faults_stats}, + {GET_CHIP_ID, get_chip_id_test}, + {GET_SINGLE_CARD_INFO, get_single_card_info}, + {GET_FIRMWARE_ACTIVE_STATUS, get_firmware_active_status}, + {GET_DEVICE_ID, get_device_id}, + {IS_DRV_IN_VM, is_driver_in_vm}, + {GET_PF_ID, get_pf_id}, +}; + +int send_to_hw_driver(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int index, num_cmds = sizeof(hw_driv_module_cmd_handle) / + sizeof(hw_driv_module_cmd_handle[0]); + enum driver_cmd_type cmd_type; + int err = 0; + + if (!nt_msg) { + pr_err("Input param invalid!\n"); + return -EINVAL; + } + cmd_type = (enum driver_cmd_type)(nt_msg->msg_formate); + for (index = 0; index < num_cmds; index++) { + if (cmd_type == + hw_driv_module_cmd_handle[index].driv_cmd_name) { + err = hw_driv_module_cmd_handle[index].driv_func + (hwdev, buf_in, + in_size, buf_out, out_size); + break; + } + } + + if (index == num_cmds) + return -EINVAL; + + return err; +} + +typedef int (*sm_module)(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out); + +static int sm_rd32(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u32 val1; + int ret; + + ret = hifc_sm_ctr_rd32(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr information (32 bits)failed!\n"); + val1 = 0xffffffff; + } + + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd64_pair(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1 = 0, val2 = 0; + int ret; + + ret = hifc_sm_ctr_rd64_pair(hwdev, node, instance, id, &val1, &val2); + if (ret) { + pr_err("Get sm ctr information (64 bits pair)failed!\n"); + val1 = 0xffffffff; + } + + buf_out->val1 = val1; + buf_out->val2 = val2; + + return ret; +} + +static int sm_rd64(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1; + int ret; + + ret = hifc_sm_ctr_rd64(hwdev, node, instance, id, &val1); + if (ret) { + pr_err("Get sm ctr information (64 bits)failed!\n"); + val1 = 0xffffffff; + } + buf_out->val1 = val1; + + return ret; +} + +struct sm_module_handle { + enum sm_cmd_type sm_cmd_name; + sm_module sm_func; +}; + +struct sm_module_handle sm_module_cmd_handle[] = { + {SM_CTR_RD32, sm_rd32}, + {SM_CTR_RD64_PAIR, sm_rd64_pair}, + {SM_CTR_RD64, sm_rd64} +}; + +int send_to_sm(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct sm_in_st *sm_in = buf_in; + struct sm_out_st *sm_out = buf_out; + u32 msg_formate; + int index, num_cmds = sizeof(sm_module_cmd_handle) / + sizeof(sm_module_cmd_handle[0]); + int ret = 0; + + if ((!nt_msg) || (!buf_in) || (!buf_out) || + (in_size != sizeof(*sm_in)) || + (*out_size != sizeof(*sm_out))) { + pr_err("Input param invalid!\n"); + return -EINVAL; + } + + msg_formate = nt_msg->msg_formate; + for (index = 0; index < num_cmds; index++) { + if (msg_formate == sm_module_cmd_handle[index].sm_cmd_name) + ret = sm_module_cmd_handle[index].sm_func(hwdev, + (u32)sm_in->id, + (u8)sm_in->instance, + (u8)sm_in->node, sm_out); + } + + if (ret) + pr_err("Get sm information fail!\n"); + + *out_size = sizeof(struct sm_out_st); + + return ret; +} + +static u32 get_up_timeout_val(enum hifc_mod_type mod, u8 cmd) +{ +#define UP_UPDATEFW_TIME_OUT_VAL 20000U + if (mod == HIFC_MOD_L2NIC && cmd == NIC_UP_CMD_UPDATE_FW) + return UP_UPDATEFW_TIME_OUT_VAL; + else + return UP_COMP_TIME_OUT_VAL; +} + +static int api_csr_write(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in; + int ret = 0; + u32 rd_len; + u32 rd_addr; + u32 rd_cnt = 0; + u32 offset = 0; + u8 node_id; + u32 i; + u8 *data; + + if (!buf_in || in_size != sizeof(*csr_write_msg)) + return -EINVAL; + + rd_len = csr_write_msg->rd_len; + rd_addr = csr_write_msg->addr; + node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type; + + if (rd_len % 4) { + pr_err("Csr length must be a multiple of 4\n"); + return -EFAULT; + } + + rd_cnt = rd_len / 4; + data = kzalloc(rd_len, GFP_KERNEL); + if (!data) { + pr_err("No more memory\n"); + return -EFAULT; + } + if (copy_from_user(data, (void *)csr_write_msg->data, rd_len)) { + pr_err("Copy information from user failed\n"); + kfree(data); + return -EFAULT; + } + + for (i = 0; i < rd_cnt; i++) { + ret = hifc_api_csr_wr32(hwdev, node_id, + rd_addr + offset, + *((u32 *)(data + offset))); + if (ret) { + pr_err("Csr wr fail, ret: %d, node_id: %d, csr addr: 0x%08x\n", + ret, rd_addr + offset, node_id); + kfree(data); + return ret; + } + offset += 4; + } + + *out_size = 0; + kfree(data); + return ret; +} + +static int api_csr_read(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in; + int ret = 0; + u32 rd_len; + u32 rd_addr; + u32 rd_cnt = 0; + u32 offset = 0; + u8 node_id; + u32 i; + + if (!buf_in || !buf_out || in_size != sizeof(*up_log_msg) || + *out_size != up_log_msg->rd_len) + return -EINVAL; + + rd_len = up_log_msg->rd_len; + rd_addr = up_log_msg->addr; + node_id = (u8)nt_msg->up_cmd.up_db.comm_mod_type; + + rd_cnt = rd_len / 4; + + if (rd_len % 4) + rd_cnt++; + + for (i = 0; i < rd_cnt; i++) { + ret = hifc_api_csr_rd32(hwdev, node_id, + rd_addr + offset, + (u32 *)(((u8 *)buf_out) + offset)); + if (ret) { + pr_err("Csr rd fail, err: %d, node_id: %d, csr addr: 0x%08x\n", + ret, node_id, rd_addr + offset); + return ret; + } + offset += 4; + } + *out_size = rd_len; + + return ret; +} + +int send_to_up(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + + if ((!nt_msg) || (!hwdev) || (!buf_in) || (!buf_out)) { + pr_err("Input param invalid!\n"); + return -EINVAL; + } + + if ((nt_msg->up_cmd.up_db.up_api_type == API_CMD) || + (nt_msg->up_cmd.up_db.up_api_type == API_CLP)) { + enum hifc_mod_type mod; + u8 cmd; + u32 timeout; + + mod = (enum hifc_mod_type)nt_msg->up_cmd.up_db.comm_mod_type; + cmd = nt_msg->up_cmd.up_db.chipif_cmd; + + timeout = get_up_timeout_val(mod, cmd); + + if (nt_msg->up_cmd.up_db.up_api_type == API_CMD) + ret = hifc_msg_to_mgmt_sync(hwdev, mod, cmd, + buf_in, (u16)in_size, + buf_out, (u16 *)out_size, + timeout); + else + ret = hifc_clp_to_mgmt(hwdev, mod, cmd, + buf_in, (u16)in_size, + buf_out, (u16 *)out_size); + if (ret) { + pr_err("Message to mgmt cpu return fail, mod: %d, cmd: %d\n", + mod, cmd); + return ret; + } + + } else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) { + if (nt_msg->up_cmd.up_db.chipif_cmd == API_CSR_WRITE) { + ret = api_csr_write(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + return ret; + } + + ret = api_csr_read(hwdev, nt_msg, buf_in, + in_size, buf_out, out_size); + } + + return ret; +} + +int send_to_ucode(void *hwdev, struct msg_module *nt_msg, void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + + if ((!nt_msg) || (!hwdev) || (!buf_in)) { + pr_err("Input param invalid!\n"); + return -EINVAL; + } + + if (nt_msg->ucode_cmd.ucode_db.ucode_imm) { + ret = hifc_cmdq_direct_resp + (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type, + nt_msg->ucode_cmd.ucode_db.comm_mod_type, + nt_msg->ucode_cmd.ucode_db.ucode_cmd_type, + buf_in, buf_out, 0); + if (ret) + pr_err("Send direct cmdq err: %d!\n", ret); + } else { + ret = hifc_cmdq_detail_resp + (hwdev, nt_msg->ucode_cmd.ucode_db.cmdq_ack_type, + nt_msg->ucode_cmd.ucode_db.comm_mod_type, + nt_msg->ucode_cmd.ucode_db.ucode_cmd_type, + buf_in, buf_out, 0); + if (ret) + pr_err("Send detail cmdq err: %d!\n", ret); + } + + return ret; +} + +void hifc_tool_cnt_inc(void) +{ + atomic_inc(&tool_used_cnt); +} + +void hifc_tool_cnt_dec(void) +{ + atomic_dec(&tool_used_cnt); +} + +static bool __is_pcidev_match_chip_name(const char *ifname, + struct hifc_pcidev *dev, + struct card_node *chip_node, + enum func_type type) +{ + if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { + if (type == TYPE_UNKNOWN) { + if (dev->init_state < HIFC_INIT_STATE_HW_PART_INITED) + return false; + } else { + if (dev->init_state >= + HIFC_INIT_STATE_HW_PART_INITED && + hifc_func_type(dev->hwdev) != type) + return false; + } + + return true; + } + + return false; +} + +static struct hifc_pcidev *_get_pcidev_by_chip_name(char *ifname, + enum func_type type) +{ + struct card_node *chip_node; + struct hifc_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (__is_pcidev_match_chip_name(ifname, dev, chip_node, + type)) { + lld_dev_put(); + return dev; + } + } + } + + lld_dev_put(); + + return NULL; +} + +static struct hifc_pcidev *hifc_get_pcidev_by_chip_name(char *ifname) +{ + struct hifc_pcidev *dev, *dev_hw_init; + + /* find hw init device first */ + dev_hw_init = _get_pcidev_by_chip_name(ifname, TYPE_UNKNOWN); + if (dev_hw_init) { + if (hifc_func_type(dev_hw_init->hwdev) == TYPE_PPF) + return dev_hw_init; + } + + dev = _get_pcidev_by_chip_name(ifname, TYPE_PPF); + if (dev) { + if (dev_hw_init && (dev_hw_init->init_state >= dev->init_state)) + return dev_hw_init; + + return dev; + } + + dev = _get_pcidev_by_chip_name(ifname, TYPE_PF); + if (dev) { + if (dev_hw_init && (dev_hw_init->init_state >= dev->init_state)) + return dev_hw_init; + + return dev; + } + + return NULL; +} + +static struct hifc_pcidev *hifc_get_pcidev_by_ifname(char *ifname) +{ + struct hifc_pcidev *dev; + + /* support search hwdev by chip name, net device name, + * or fc device name + */ + /* Find pcidev by chip_name first */ + dev = hifc_get_pcidev_by_chip_name(ifname); + if (dev) + return dev; + + /* If ifname not a chip name, + * find pcidev by FC name or netdevice name + */ + return hifc_get_pcidev_by_dev_name(ifname); +} + +void *hifc_get_hwdev_by_ifname(char *ifname) +{ + struct hifc_pcidev *dev; + + if (!ifname) { + pr_err("Input param invalid!\n"); + return NULL; + } + + dev = hifc_get_pcidev_by_ifname(ifname); + if (dev) + return dev->hwdev; + + return NULL; +} + +enum hifc_init_state hifc_get_init_state_by_ifname(char *ifname) +{ + struct hifc_pcidev *dev; + + if (!ifname) { + pr_err("Input param invalid!\n"); + return HIFC_INIT_STATE_NONE; + } + dev = hifc_get_pcidev_by_ifname(ifname); + if (dev) + return dev->init_state; + + pr_err("Can not get device %s\n", ifname); + + return HIFC_INIT_STATE_NONE; +} + +void get_fc_devname(char *devname) +{ + struct card_node *chip_node; + struct hifc_pcidev *dev; + + if (!devname) { + pr_err("Input param invalid!\n"); + return; + } + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->init_state < HIFC_INIT_STATE_ALL_INITED) + continue; + + if (dev->uld_dev) { + strlcpy(devname, (char *)dev->uld_dev_name, + IFNAMSIZ); + lld_dev_put(); + return; + } + } + } + lld_dev_put(); +} + +void hifc_get_all_chip_id(void *id_info) +{ + struct nic_card_id *card_id = (struct nic_card_id *)id_info; + struct card_node *chip_node; + int i = 0; + int id, err; + + if (!card_id) { + pr_err("Input param invalid!\n"); + return; + } + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + err = sscanf(chip_node->chip_name, HIFC_CHIP_NAME "%d", &id); + if (err < 0) + pr_err("Failed to get hifc id\n"); + + card_id->id[i] = id; + i++; + } + lld_dev_put(); + card_id->num = i; +} + +static struct card_node *hifc_get_chip_node_by_hwdev(const void *hwdev) +{ + struct card_node *chip_node = NULL; + struct card_node *node_tmp = NULL; + struct hifc_pcidev *dev; + + if (!hwdev) + return NULL; + + lld_dev_hold(); + list_for_each_entry(node_tmp, &g_hinic_chip_list, node) { + if (!chip_node) { + list_for_each_entry(dev, &node_tmp->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (dev->hwdev == hwdev) { + chip_node = node_tmp; + break; + } + } + } + } + + lld_dev_put(); + + return chip_node; +} + +int hifc_get_device_id(void *hwdev, u16 *dev_id) +{ + struct card_node *chip_node = NULL; + struct hifc_pcidev *dev; + u16 vendor_id = 0; + u16 device_id = 0; + + if ((!dev_id) || (!hwdev)) { + pr_err("Input param invalid!\n"); + return -ENODEV; + } + chip_node = hifc_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return -ENODEV; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + pci_read_config_word(dev->pcidev, 0, &vendor_id); + if (vendor_id == HIFC_PCI_VENDOR_ID) { + pci_read_config_word(dev->pcidev, 2, &device_id); + break; + } + } + lld_dev_put(); + *dev_id = device_id; + + return 0; +} + +int hifc_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid) +{ + struct card_node *chip_node = NULL; + struct hifc_pcidev *dev; + + if ((!isvalid) || (!pf_id) || (!hwdev)) { + pr_err("Input param invalid!\n"); + return -ENODEV; + } + chip_node = hifc_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return -ENODEV; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hifc_physical_port_id(dev->hwdev) == port_id) { + *pf_id = hifc_global_func_id(dev->hwdev); + *isvalid = 1; + break; + } + } + lld_dev_put(); + + return 0; +} + +bool hifc_is_valid_bar_addr(u64 offset) +{ + struct card_node *chip_node = NULL; + struct hifc_pcidev *dev; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + if (offset == pci_resource_start(dev->pcidev, 0)) { + lld_dev_put(); + return true; + } + } + } + lld_dev_put(); + + return false; +} + +void hifc_get_card_func_info_by_card_name( + const char *chip_name, struct hifc_card_func_info *card_func) +{ + struct card_node *chip_node = NULL; + struct hifc_pcidev *dev; + struct func_pdev_info *pdev_info; + + if ((!card_func) || (!chip_name)) { + pr_err("Input param invalid!\n"); + return; + } + card_func->num_pf = 0; + + lld_dev_hold(); + list_for_each_entry(chip_node, &g_hinic_chip_list, node) { + if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) + continue; + + list_for_each_entry(dev, &chip_node->func_list, node) { + if (hifc_func_type(dev->hwdev) == TYPE_VF) + continue; + + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + continue; + + pdev_info = &card_func->pdev_info[card_func->num_pf]; + pdev_info->bar0_size = pci_resource_len(dev->pcidev, 0); + pdev_info->bar0_phy_addr = + pci_resource_start(dev->pcidev, 0); + + card_func->num_pf++; + if (card_func->num_pf >= MAX_SIZE) + break; + } + } + + lld_dev_put(); +} + +static bool __is_func_valid(struct hifc_pcidev *dev) +{ + if (test_bit(HIFC_FUNC_IN_REMOVE, &dev->flag)) + return false; + + if (dev->init_state < HIFC_INIT_STATE_HWDEV_INITED) + return false; + + return true; +} + +void hifc_get_card_info(void *hwdev, void *bufin) +{ + struct card_node *chip_node = NULL; + struct card_info *info = (struct card_info *)bufin; + struct hifc_pcidev *dev; + u32 idx = 0; + + if ((!bufin) || (!hwdev)) { + pr_err("Input param invalid!\n"); + return; + } + info->pf_num = 0; + + chip_node = hifc_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + lld_dev_hold(); + list_for_each_entry(dev, &chip_node->func_list, node) { + if (!__is_func_valid(dev)) + continue; + + strlcpy(info->pf[idx].name, dev->uld_dev_name, IFNAMSIZ); + info->pf[idx].pf_type = (u32)BIT(SERVICE_T_FC); + strlcpy(info->pf[idx].bus_info, pci_name(dev->pcidev), + sizeof(info->pf[idx].bus_info)); + info->pf_num++; + idx++; + } + lld_dev_put(); +} diff --git a/drivers/scsi/huawei/hifc/unf_portman.c b/drivers/scsi/huawei/hifc/unf_portman.c new file mode 100644 index 000000000000..d244a5c1aaa2 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_portman.c @@ -0,0 +1,5565 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_event.h" +#include "unf_lport.h" +#include "unf_exchg.h" +#include "unf_portman.h" +#include "unf_rport.h" +#include "unf_io.h" +#include "unf_service.h" +#include "unf_rport.h" +#include "unf_npiv.h" +#include "hifc_portmng.h" + +#define UNF_LOOP_STOP_NEED_WAIT 0 +#define UNF_LOOP_STOP_NO_NEED_WAIT 1 + +#define UNF_MAX_SAVE_ENTRY_NUM 60 +#define UNF_CHECK_CONFIG_SPEED_BY_SFSSPEED(sfs_speed, cfg_speed) \ + ((sfs_speed) < (cfg_speed) || (sfs_speed) == UNF_PORT_SFP_SPEED_ERR) +#define UNF_LPORT_CHIP_ERROR(lport) \ + ((lport)->pcie_error_cnt.pcie_error_count[UNF_PCIE_FATALERRORDETECTED]) + +struct unf_global_lport_s global_lport_mgr; + +static unsigned int unf_port_link_up(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_link_down(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_abnormal_reset(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_reset_start(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_reset_end(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_nop(struct unf_lport_s *v_lport, void *v_in_put); +static unsigned int unf_port_clean_done(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_begin_remove(struct unf_lport_s *v_lport, + void *v_in_put); +static unsigned int unf_port_release_rport_index(struct unf_lport_s *v_lport, + void *v_in_put); +static int unf_cm_port_info_get(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_port_speed_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_topo_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_port_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_get_port_sfp_info(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_get_all_port_info(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_clear_error_code_sum(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_bbscn_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_get_io_dfx_statistics(struct unf_lport_s *v_pstLPort, + struct unf_hinicam_pkg *v_input); +static int unf_cm_set_vport(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input); +static int unf_cm_link_delay_get(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_save_data_mode(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_set_dif(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_select_dif_mode(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_adm_show_xchg(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_adm_link_time_out_opt(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); +static int unf_cm_adm_log_level_opt(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_in_put); + +static struct unf_port_action_s lport_action[] = { + { UNF_PORT_LINK_UP, unf_port_link_up }, + { UNF_PORT_LINK_DOWN, unf_port_link_down }, + { UNF_PORT_RESET_START, unf_port_reset_start }, + { UNF_PORT_RESET_END, unf_port_reset_end }, + { UNF_PORT_NOP, unf_port_nop }, + { UNF_PORT_CLEAN_DONE, unf_port_clean_done }, + { UNF_PORT_BEGIN_REMOVE, unf_port_begin_remove }, + { UNF_PORT_RELEASE_RPORT_INDEX, unf_port_release_rport_index }, + { UNF_PORT_ABNORMAL_RESET, unf_port_abnormal_reset }, +}; + +static struct unf_hifcadm_action_s unf_hifcadm_action[] = { + { UNF_PORT_SET_OP, unf_cm_port_set }, + { UNF_TOPO_SET_OP, unf_cm_topo_set }, + { UNF_SPEED_SET_OP, unf_cm_port_speed_set }, + { UNF_INFO_GET_OP, unf_cm_port_info_get }, + { UNF_INFO_CLEAR_OP, unf_cm_clear_error_code_sum }, + { UNF_SFP_INFO_OP, unf_get_port_sfp_info }, + { UNF_ALL_INFO_OP, unf_cm_get_all_port_info }, + { UNF_BBSCN, unf_cm_bbscn_set }, + { UNF_DFX, unf_get_io_dfx_statistics }, + { UNF_VPORT, unf_cm_set_vport }, + { UNF_LINK_DELAY, unf_cm_link_delay_get }, + { UNF_SAVA_DATA, unf_cm_save_data_mode }, + { UNF_DIF, unf_cm_set_dif }, + { UNF_DIF_CONFIG, unf_cm_select_dif_mode }, + { UNF_SHOW_XCHG, unf_cm_adm_show_xchg }, + { FC_LINK_TMO_OPT, unf_cm_adm_link_time_out_opt }, + { FC_DRV_LOG_OPT, unf_cm_adm_log_level_opt }, +}; + +static void unf_destroy_dirty_rport(struct unf_lport_s *v_lport, + int v_show_only) +{ + unsigned int dirty_rport = 0; + + UNF_REFERNCE_VAR(dirty_rport); + + /* for whole L_Port */ + if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) { + dirty_rport = v_lport->rport_pool.rport_pool_count; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) has %u dirty RPort(s)", + v_lport->port_id, dirty_rport); + + /* free R_Port pool memory & bitmap */ + if (v_show_only == UNF_FALSE) { + vfree(v_lport->rport_pool.rport_pool_add); + v_lport->rport_pool.rport_pool_add = NULL; + vfree(v_lport->rport_pool.pul_rpi_bitmap); + v_lport->rport_pool.pul_rpi_bitmap = NULL; + } + } + + UNF_REFERNCE_VAR(dirty_rport); +} + +void unf_show_dirty_port(int v_show_only, unsigned int *v_ditry_port_num) +{ + struct list_head *node = NULL; + struct list_head *node_next = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + unsigned int port_num = 0; + + UNF_CHECK_VALID(0x2200, UNF_TRUE, NULL != v_ditry_port_num, return); + + /* for each dirty L_Port from global L_Port list */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_safe(node, node_next, &global_lport_mgr.list_dirty_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) has dirty data(0x%x)", + lport->port_id, lport->dirty_flag); + + /* Destroy dirty L_Port's exchange(s) & R_Port(s) */ + unf_destroy_dirty_xchg(lport, v_show_only); + unf_destroy_dirty_rport(lport, v_show_only); + + /* Delete (dirty L_Port) list entry if necessary */ + if (v_show_only == UNF_FALSE) { + list_del_init(node); + vfree(lport); + } + + port_num++; + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + *v_ditry_port_num = port_num; +} + +int unf_send_event(unsigned int port_id, + unsigned int syn_flag, + void *argc_in, + void *argc_out, + int (*p_func)(void *argc_in, void *argc_out)) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_event_report *event = NULL; + int ret = 0; + + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_INFO, + "Cannot find LPort(0x%x).", port_id); + + return UNF_RETURN_ERROR; + } + + if (unf_lport_refinc(lport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "LPort(0x%x) is removing, no need process.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + if (unlikely((!lport->event_mgr.pfn_unf_get_free_event) || + (!lport->event_mgr.pfn_unf_post_event) || + (!lport->event_mgr.pfn_unf_release_event))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Event function is NULL."); + + unf_lport_ref_dec_to_destroy(lport); + + return UNF_RETURN_ERROR; + } + + if (lport->b_port_removing == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "LPort(0x%x) is removing, no need process.", + lport->port_id); + + unf_lport_ref_dec_to_destroy(lport); + + return UNF_RETURN_ERROR; + } + + event = lport->event_mgr.pfn_unf_get_free_event((void *)lport); + if (!event) { + unf_lport_ref_dec_to_destroy(lport); + + return UNF_RETURN_ERROR; + } + + init_completion(&event->event_comp); + event->lport = lport; + event->event_asy_flag = syn_flag; + event->pfn_unf_event_task = p_func; + event->para_in = argc_in; + event->para_out = argc_out; + lport->event_mgr.pfn_unf_post_event(lport, event); + + if (event->event_asy_flag) { + /* You must wait for the other party to return. Otherwise, + *the linked list may be in disorder. + */ + wait_for_completion(&event->event_comp); + ret = (int)event->result; + lport->event_mgr.pfn_unf_release_event(lport, event); + } else { + ret = RETURN_OK; + } + + unf_lport_ref_dec_to_destroy(lport); + return ret; +} + +void unf_lport_update_topo(struct unf_lport_s *v_lport, + enum unf_act_topo_e v_enactive_topo) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2210, UNF_TRUE, NULL != v_lport, return); + + if ((v_enactive_topo > UNF_ACT_TOP_UNKNOWN) || + (v_enactive_topo < UNF_ACT_TOP_PUBLIC_LOOP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) set invalid topology(0x%x) with current value(0x%x)", + v_lport->nport_id, v_enactive_topo, + v_lport->en_act_topo); + + return; + } + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + v_lport->en_act_topo = v_enactive_topo; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); +} + +void unf_set_lport_removing(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x2216, UNF_TRUE, (v_lport), return); + + v_lport->fc_port = NULL; + v_lport->b_port_removing = UNF_TRUE; + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_0_SET_REMOVING; +} + +unsigned int unf_release_local_port(void *v_lport) +{ + struct unf_lport_s *lport = v_lport; + struct completion local_port_free_completion = + COMPLETION_INITIALIZER(local_port_free_completion); + + UNF_CHECK_VALID(0x2217, UNF_TRUE, (lport), + return UNF_RETURN_ERROR); + + lport->lport_free_completion = &local_port_free_completion; + unf_set_lport_removing(lport); + unf_lport_ref_dec(lport); + wait_for_completion(lport->lport_free_completion); + /* for dirty case */ + if (lport->dirty_flag == 0) + vfree(lport); + + return RETURN_OK; +} + +static void unf_free_all_esgl_pages(struct unf_lport_s *v_lport) +{ + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + unsigned int alloc_idx; + + UNF_CHECK_VALID(0x2218, UNF_TRUE, (v_lport), return); + spin_lock_irqsave(&v_lport->esgl_pool.esgl_pool_lock, flag); + list_for_each_safe(node, next_node, + &v_lport->esgl_pool.list_esgl_pool) { + list_del(node); + } + + spin_unlock_irqrestore(&v_lport->esgl_pool.esgl_pool_lock, flag); + + if (v_lport->esgl_pool.esgl_buf_list.buflist) { + for (alloc_idx = 0; + alloc_idx < v_lport->esgl_pool.esgl_buf_list.buf_num; + alloc_idx++) { + if (v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr) { + dma_free_coherent(&v_lport->low_level_func.dev->dev, + v_lport->esgl_pool.esgl_buf_list.buf_size, + v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr, + v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].paddr); + v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr = NULL; + } + } + kfree(v_lport->esgl_pool.esgl_buf_list.buflist); + v_lport->esgl_pool.esgl_buf_list.buflist = NULL; + } +} + +static unsigned int unf_init_esgl_pool(struct unf_lport_s *v_lport) +{ + struct unf_esgl_s *esgl = NULL; + unsigned int ret = RETURN_OK; + unsigned int index = 0; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int alloc_idx; + unsigned int cur_buf_idx = 0; + unsigned int cur_buf_offset = 0; + unsigned int buf_cnt_perhugebuf; + + UNF_CHECK_VALID(0x2219, UNF_TRUE, NULL != v_lport, + return UNF_RETURN_ERROR); + + v_lport->esgl_pool.esgl_pool_count = + v_lport->low_level_func.lport_cfg_items.max_io; + spin_lock_init(&v_lport->esgl_pool.esgl_pool_lock); + INIT_LIST_HEAD(&v_lport->esgl_pool.list_esgl_pool); + + v_lport->esgl_pool.esgl_pool_addr = + vmalloc((size_t)((v_lport->esgl_pool.esgl_pool_count) * + sizeof(struct unf_esgl_s))); + if (!v_lport->esgl_pool.esgl_pool_addr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ERR, + "LPort(0x%x) cannot allocate ESGL Pool.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + esgl = (struct unf_esgl_s *)v_lport->esgl_pool.esgl_pool_addr; + memset(esgl, 0, ((v_lport->esgl_pool.esgl_pool_count) * + sizeof(struct unf_esgl_s))); + + buf_total_size = + (unsigned int)(PAGE_SIZE * v_lport->esgl_pool.esgl_pool_count); + + v_lport->esgl_pool.esgl_buf_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? BUF_LIST_PAGE_SIZE : + buf_total_size; + buf_cnt_perhugebuf = + v_lport->esgl_pool.esgl_buf_list.buf_size / PAGE_SIZE; + buf_num = v_lport->esgl_pool.esgl_pool_count % + buf_cnt_perhugebuf ? v_lport->esgl_pool.esgl_pool_count / + buf_cnt_perhugebuf + 1 : v_lport->esgl_pool.esgl_pool_count / + buf_cnt_perhugebuf; + v_lport->esgl_pool.esgl_buf_list.buflist = + (struct buff_list_s *) + kmalloc(buf_num * sizeof(struct buff_list_s), GFP_KERNEL); + v_lport->esgl_pool.esgl_buf_list.buf_num = buf_num; + + if (!v_lport->esgl_pool.esgl_buf_list.buflist) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Esgl pool buf list failed out of memory"); + goto free_buff; + } + memset(v_lport->esgl_pool.esgl_buf_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr = + dma_alloc_coherent( + &v_lport->low_level_func.dev->dev, + v_lport->esgl_pool.esgl_buf_list.buf_size, + &v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].paddr, + GFP_KERNEL); + if (!v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr) + goto free_buff; + + memset(v_lport->esgl_pool.esgl_buf_list.buflist[alloc_idx].vaddr, + 0, v_lport->esgl_pool.esgl_buf_list.buf_size); + } + + /* allocates the Esgl page, and the DMA uses the */ + for (index = 0; index < v_lport->esgl_pool.esgl_pool_count; index++) { + if ((index != 0) && !(index % buf_cnt_perhugebuf)) + cur_buf_idx++; + + cur_buf_offset = + (unsigned int) + (PAGE_SIZE * (index % buf_cnt_perhugebuf)); + esgl->page.page_address = + (unsigned long long)v_lport->esgl_pool.esgl_buf_list.buflist[cur_buf_idx].vaddr + + cur_buf_offset; + esgl->page.page_size = PAGE_SIZE; + esgl->page.esgl_phyaddr = + v_lport->esgl_pool.esgl_buf_list.buflist[cur_buf_idx].paddr + + cur_buf_offset; + list_add_tail(&esgl->entry_esgl, + &v_lport->esgl_pool.list_esgl_pool); + esgl++; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[EVENT]Allocate bufnum:%u, buf_total_size:%u", buf_num, + buf_total_size); + return ret; +free_buff: + unf_free_all_esgl_pages(v_lport); + vfree(v_lport->esgl_pool.esgl_pool_addr); + + return UNF_RETURN_ERROR; +} + +static void unf_free_esgl_pool(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x2220, UNF_TRUE, (v_lport), return); + + unf_free_all_esgl_pages(v_lport); + v_lport->esgl_pool.esgl_pool_count = 0; + + if (v_lport->esgl_pool.esgl_pool_addr) { + vfree(v_lport->esgl_pool.esgl_pool_addr); + v_lport->esgl_pool.esgl_pool_addr = NULL; + } + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL; +} + +struct unf_lport_s *unf_find_lport_by_port_id(unsigned int v_port_id) +{ + struct unf_lport_s *lport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + unsigned int port_id = v_port_id & (~PORTID_VPINDEX_MASK); + unsigned short vport_index = (v_port_id & PORTID_VPINDEX_MASK) >> + PORTID_VPINDEX_SHIT; + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_lport_list_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + if ((port_id == lport->port_id) && + (lport->b_port_removing != UNF_TRUE)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + return unf_cm_lookup_vport_by_vp_index(lport, + vport_index); + } + } + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_intergrad_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + if ((port_id == lport->port_id) && + (lport->b_port_removing != UNF_TRUE)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + return unf_cm_lookup_vport_by_vp_index(lport, + vport_index); + } + } + + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + return NULL; +} + +unsigned int unf_is_vport_valid(struct unf_lport_s *v_lport, + struct unf_lport_s *v_vport) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1977, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1977, UNF_TRUE, v_vport, return UNF_RETURN_ERROR); + + lport = v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) vport pool is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + + if (vport == v_vport && vport->b_port_removing != UNF_TRUE) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return RETURN_OK; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + + if (vport == v_vport && vport->b_port_removing != UNF_TRUE) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return RETURN_OK; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + return UNF_RETURN_ERROR; +} + +unsigned int unf_is_lport_valid(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_lport_list_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + if ((v_lport == lport) && + (lport->b_port_removing != UNF_TRUE)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + return RETURN_OK; + } + + if (unf_is_vport_valid(lport, v_lport) == RETURN_OK) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return RETURN_OK; + } + } + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_intergrad_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + if ((v_lport == lport) && + (lport->b_port_removing != UNF_TRUE)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + return RETURN_OK; + } + + if (unf_is_vport_valid(lport, v_lport) == RETURN_OK) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return RETURN_OK; + } + } + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_destroy_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + if ((v_lport == lport) && + (lport->b_port_removing != UNF_TRUE)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + return RETURN_OK; + } + + if (unf_is_vport_valid(lport, v_lport) == RETURN_OK) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return RETURN_OK; + } + } + + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + return UNF_RETURN_ERROR; +} + +static void unf_clean_link_down_io(struct unf_lport_s *v_lport, + int v_clean_flag) +{ + /* Clean L_Port/V_Port Link Down I/O: Set Abort Tag */ + UNF_CHECK_VALID(0x2225, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x2685, UNF_TRUE, + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io, + return); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io(v_lport, + UNF_XCHG_TYPE_INI, v_clean_flag); + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io(v_lport, + UNF_XCHG_TYPE_SFS, v_clean_flag); +} + +unsigned int unf_fc_port_link_event(void *v_lport, unsigned int v_events, + void *v_input) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int index = 0; + + if (unlikely(!v_lport)) + return UNF_RETURN_ERROR; + + lport = (struct unf_lport_s *)v_lport; + + ret = unf_lport_refinc(lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) is removing and do nothing", + lport->port_id); + return RETURN_OK; + } + + /* process port event */ + while (index < (sizeof(lport_action) / + sizeof(struct unf_port_action_s))) { + if (v_events == lport_action[index].action) { + ret = lport_action[index].fn_unf_action(lport, v_input); + + unf_lport_ref_dec_to_destroy(lport); + + return ret; + } + index++; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) receive unknown event(0x%x)", + lport->port_id, v_events); + + unf_lport_ref_dec_to_destroy(lport); + + return ret; +} + +void unf_port_mgmt_init(void) +{ + memset(&global_lport_mgr, 0, sizeof(struct unf_global_lport_s)); + + INIT_LIST_HEAD(&global_lport_mgr.list_lport_list_head); + + INIT_LIST_HEAD(&global_lport_mgr.list_intergrad_head); + + INIT_LIST_HEAD(&global_lport_mgr.list_destroy_head); + + INIT_LIST_HEAD(&global_lport_mgr.list_dirty_head); + + spin_lock_init(&global_lport_mgr.global_lport_list_lock); + + UNF_SET_NOMAL_MODE(global_lport_mgr.dft_mode); + + global_lport_mgr.b_start_work = UNF_TRUE; +} + +void unf_port_mgmt_deinit(void) +{ + if (global_lport_mgr.lport_sum != 0) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]There are %u port pool memory giveaway", + global_lport_mgr.lport_sum); + + memset(&global_lport_mgr, 0, sizeof(struct unf_global_lport_s)); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Common port manager exit succeed"); +} + +static void unf_port_register(struct unf_lport_s *v_lport) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x2230, UNF_TRUE, (v_lport), return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Register LPort(0x%p), port ID(0x%x).", + v_lport, v_lport->port_id); + + /* Add to the global management linked list header */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_add_tail(&v_lport->entry_lport, + &global_lport_mgr.list_lport_list_head); + global_lport_mgr.lport_sum++; + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); +} + +static void unf_port_unregister(struct unf_lport_s *v_lport) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x2703, UNF_TRUE, (v_lport), return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Unregister LPort(0x%p), port ID(0x%x).", + v_lport, v_lport->port_id); + + /* Remove from the global management linked list header */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_del(&v_lport->entry_lport); + global_lport_mgr.lport_sum--; + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); +} + +static int unf_port_switch(struct unf_lport_s *v_lport, + unsigned int v_switch_flag) +{ + struct unf_lport_s *lport = v_lport; + int ret = UNF_RETURN_ERROR; + int switch_flag = UNF_FALSE; + + UNF_CHECK_VALID(0x2261, UNF_TRUE, lport, return UNF_RETURN_ERROR); + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x)'s config(switch) function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + switch_flag = v_switch_flag ? UNF_TRUE : UNF_FALSE; + ret = (int)lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_PORT_SWITCH, (void *)&switch_flag); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) switch %s failed", + lport->port_id, + v_switch_flag ? "On" : "Off"); + + return UNF_RETURN_ERROR; + } + + lport->b_switch_state = (enum int_e)switch_flag; + + return RETURN_OK; +} + +int unf_port_start_work(struct unf_lport_s *v_lport) +{ + unsigned long flag = 0; + struct unf_fw_version_s fw_version = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2231, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + if (v_lport->en_start_work_state != UNF_START_WORK_STOP) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + return RETURN_OK; + } + v_lport->en_start_work_state = UNF_START_WORK_COMPLETE; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_diagnose) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + fw_version.message_type = UNF_DEBUG_TYPE_MESSAGE; + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_diagnose( + (void *)v_lport->fc_port, + UNF_PORT_DIAG_PORT_DETAIL, &fw_version); + if (ret != RETURN_OK) + v_lport->fw_version[0] = '\0'; + else + memcpy(v_lport->fw_version, fw_version.fw_version, + HIFC_VER_LEN); + + unf_cm_get_save_info(v_lport); + /* switch sfp to start work */ + (void)unf_port_switch(v_lport, UNF_TRUE); + + return RETURN_OK; +} + +static unsigned int unf_lport_init_lw_fun_op( + struct unf_lport_s *v_lport, + struct unf_low_level_function_op_s *low_level_op) +{ + UNF_CHECK_VALID(0x2235, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2236, UNF_TRUE, (low_level_op), + return UNF_RETURN_ERROR); + + v_lport->port_id = low_level_op->lport_cfg_items.port_id; + v_lport->port_name = low_level_op->sys_port_name; + v_lport->node_name = low_level_op->sys_node_name; + v_lport->options = low_level_op->lport_cfg_items.port_mode; + v_lport->en_act_topo = UNF_ACT_TOP_UNKNOWN; + + memcpy(&v_lport->low_level_func, low_level_op, + sizeof(struct unf_low_level_function_op_s)); + + return RETURN_OK; +} + +void unf_lport_release_lw_fun_op(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x2237, UNF_TRUE, v_lport, return); + + memset(&v_lport->low_level_func, 0, + sizeof(struct unf_low_level_function_op_s)); + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE; +} + +struct unf_lport_s *unf_find_lport_by_scsi_host_id(unsigned int scsi_host_id) +{ + struct list_head *node = NULL, *next_node = NULL; + struct list_head *vp_node = NULL, *next_vp_node = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + unsigned long flags = 0; + unsigned long vpool_flags = 0; + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_safe(node, next_node, + &global_lport_mgr.list_lport_list_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + if (scsi_host_id == + UNF_GET_SCSI_HOST_ID((lport->host_info.p_scsi_host))) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return lport; + } + + /* support NPIV */ + if (lport->vport_pool) { + spin_lock_irqsave(&lport->vport_pool->vport_pool_lock, + vpool_flags); + list_for_each_safe(vp_node, next_vp_node, + &lport->list_vports_head) { + vport = list_entry(vp_node, struct unf_lport_s, + entry_vport); + + if (scsi_host_id == + UNF_GET_SCSI_HOST_ID(vport->host_info.p_scsi_host)) { + spin_unlock_irqrestore( + &lport->vport_pool->vport_pool_lock, + vpool_flags); + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return vport; + } + } + spin_unlock_irqrestore( + &lport->vport_pool->vport_pool_lock, vpool_flags); + } + } + + list_for_each_safe(node, next_node, + &global_lport_mgr.list_intergrad_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + + if (scsi_host_id == + UNF_GET_SCSI_HOST_ID(lport->host_info.p_scsi_host)) { + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return lport; + } + + /* support NPIV */ + if (lport->vport_pool) { + spin_lock_irqsave(&lport->vport_pool->vport_pool_lock, + vpool_flags); + list_for_each_safe(vp_node, next_vp_node, + &lport->list_vports_head) { + vport = list_entry(vp_node, struct unf_lport_s, + entry_vport); + + if (scsi_host_id == + UNF_GET_SCSI_HOST_ID(vport->host_info.p_scsi_host)) { + spin_unlock_irqrestore( + &lport->vport_pool->vport_pool_lock, + vpool_flags); + spin_unlock_irqrestore( + &global_lport_mgr.global_lport_list_lock, + flags); + + return vport; + } + } + spin_unlock_irqrestore( + &lport->vport_pool->vport_pool_lock, vpool_flags); + } + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can not find port by scsi_host_id(0x%x), may be removing", + scsi_host_id); + + return NULL; +} + +unsigned int unf_init_scsi_id_table(struct unf_lport_s *v_lport) +{ + struct unf_rport_scsi_id_image_s *rport_scsi_id_image = NULL; + struct unf_wwpn_rport_info_s *wwpn_port_info = NULL; + unsigned int idx; + + UNF_CHECK_VALID(0x2238, UNF_TRUE, (v_lport), + return UNF_RETURN_ERROR); + + rport_scsi_id_image = &v_lport->rport_scsi_table; + rport_scsi_id_image->max_scsi_id = UNF_MAX_SCSI_ID; + + /* If the number of remote connections supported by the L_Port is 0, + * an exception occurs + */ + if (rport_scsi_id_image->max_scsi_id == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x), supported maximum login is zero.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + rport_scsi_id_image->wwn_rport_info_table = + vmalloc(rport_scsi_id_image->max_scsi_id * + sizeof(struct unf_wwpn_rport_info_s)); + if (!rport_scsi_id_image->wwn_rport_info_table) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't allocate SCSI ID Table(0x%x).", + v_lport->port_id, rport_scsi_id_image->max_scsi_id); + + return UNF_RETURN_ERROR; + } + memset(rport_scsi_id_image->wwn_rport_info_table, 0, + rport_scsi_id_image->max_scsi_id * + sizeof(struct unf_wwpn_rport_info_s)); + + wwpn_port_info = rport_scsi_id_image->wwn_rport_info_table; + + for (idx = 0; idx < rport_scsi_id_image->max_scsi_id; idx++) { + INIT_DELAYED_WORK(&wwpn_port_info->loss_tmo_work, + unf_sesion_loss_timeout); + INIT_LIST_HEAD(&wwpn_port_info->fc_lun_list); + wwpn_port_info->lport = v_lport; + wwpn_port_info->target_id = INVALID_VALUE32; + wwpn_port_info++; + } + + spin_lock_init(&rport_scsi_id_image->scsi_image_table_lock); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) supported maximum login is %d.", + v_lport->port_id, rport_scsi_id_image->max_scsi_id); + + return RETURN_OK; +} + +void unf_destroy_scsi_id_table(struct unf_lport_s *v_lport) +{ + struct unf_rport_scsi_id_image_s *rport_scsi_id_image = NULL; + struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL; + unsigned int i = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2239, UNF_TRUE, (v_lport), return); + + rport_scsi_id_image = &v_lport->rport_scsi_table; + if (rport_scsi_id_image->wwn_rport_info_table) { + for (i = 0; i < UNF_MAX_SCSI_ID; i++) { + wwpn_rport_info = + &rport_scsi_id_image->wwn_rport_info_table[i]; + UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id, + &wwpn_rport_info->loss_tmo_work, + "loss tmo Timer work"); + if (wwpn_rport_info->dfx_counter) + vfree(wwpn_rport_info->dfx_counter); + } + + /* just for pc_lint */ + if (ret) + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "Port(0x%x) cancel loss tmo work success", + v_lport->port_id); + + vfree(rport_scsi_id_image->wwn_rport_info_table); + rport_scsi_id_image->wwn_rport_info_table = NULL; + } + + rport_scsi_id_image->max_scsi_id = 0; + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE; +} + +static unsigned int unf_lport_init( + struct unf_lport_s *v_lport, + void *private_data, + struct unf_low_level_function_op_s *low_level_op) +{ + unsigned int ret = RETURN_OK; + int ret_value = RETURN_ERROR_S32; + char work_queue_name[16]; + + unf_init_portparms(v_lport); + + /* Associating LPort with FCPort */ + v_lport->fc_port = private_data; + + /* VpIndx=0 is reserved for Lport, and rootLport points to its own */ + v_lport->vp_index = 0; + v_lport->root_lport = v_lport; + v_lport->chip_info = NULL; + + /* Initialize the units related to L_Port and lw func */ + ret = unf_lport_init_lw_fun_op(v_lport, low_level_op); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) initialize lowlevel function unsuccessful.", + v_lport->port_id); + + return ret; + } + + /* Init Linkevent workqueue */ + ret_value = snprintf(work_queue_name, sizeof(work_queue_name), + "%x_lkq", (unsigned int)v_lport->port_id); + UNF_FUNCTION_RETURN_CHECK(ret_value, (int)sizeof(work_queue_name)); + + v_lport->link_event_wq = create_singlethread_workqueue(work_queue_name); + if (!v_lport->link_event_wq) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Port(0x%x) creat link event work queue failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + ret_value = snprintf(work_queue_name, sizeof(work_queue_name), + "%x_xchgwq", (unsigned int)v_lport->port_id); + UNF_FUNCTION_RETURN_CHECK(ret_value, (int)sizeof(work_queue_name)); + + v_lport->xchg_wq = create_workqueue(work_queue_name); + if (!v_lport->xchg_wq) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Port(0x%x) creat Exchg work queue failed", + v_lport->port_id); + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + return UNF_RETURN_ERROR; + } + /* scsi table (R_Port) required for initializing INI + * Initialize the scsi id Table table to manage the + * mapping between SCSI ID, WWN, and Rport. + */ + ret = unf_init_scsi_id_table(v_lport); + if (ret != RETURN_OK) { + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + return ret; + } + + /* Initialize the EXCH resource */ + ret = unf_alloc_xchg_resource(v_lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) can't allocate exchange resource.", + v_lport->port_id); + + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + unf_destroy_scsi_id_table(v_lport); + + return ret; + } + + /* Initialize the ESGL resource pool used by Lport */ + ret = unf_init_esgl_pool(v_lport); + if (ret != RETURN_OK) { + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + unf_free_all_xchg_mgr(v_lport); + unf_destroy_scsi_id_table(v_lport); + + return ret; + } + /* Initialize the disc manager under Lport */ + ret = unf_init_disc_mgr(v_lport); + if (ret != RETURN_OK) { + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + unf_free_esgl_pool(v_lport); + unf_free_all_xchg_mgr(v_lport); + unf_destroy_scsi_id_table(v_lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) initialize discover manager unsuccessful.", + v_lport->port_id); + + return ret; + } + + /* Initialize the LPort manager */ + ret = unf_init_lport_mgr_temp(v_lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) initialize RPort manager unsuccessful.", + v_lport->port_id); + + goto RELEASE_LPORT; + } + + /* Initialize the EXCH manager */ + ret = unf_init_xchg_mgr_temp(v_lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) initialize exchange manager unsuccessful.", + v_lport->port_id); + + goto RELEASE_LPORT; + } + /* Initialize the resources required by the event processing center */ + ret = unf_init_event_center(v_lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) initialize event center unsuccessful.", + v_lport->port_id); + + goto RELEASE_LPORT; + } + /* Initialize the initialization status of Lport */ + unf_set_lport_state(v_lport, UNF_LPORT_ST_INITIAL); + + /* Initialize the Lport route test case */ + ret = unf_init_lport_route(v_lport); + if (ret != RETURN_OK) { + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + (void)unf_event_center_destroy(v_lport); + unf_disc_mgr_destroy(v_lport); + unf_free_esgl_pool(v_lport); + unf_free_all_xchg_mgr(v_lport); + unf_destroy_scsi_id_table(v_lport); + + return ret; + } + + /* Thesupports the initialization stepof the NPIV */ + ret = unf_init_vport_pool(v_lport); + if (ret != RETURN_OK) { + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + + unf_destroy_lport_route(v_lport); + (void)unf_event_center_destroy(v_lport); + unf_disc_mgr_destroy(v_lport); + unf_free_esgl_pool(v_lport); + unf_free_all_xchg_mgr(v_lport); + unf_destroy_scsi_id_table(v_lport); + + return ret; + } + + /* qualifier rport callback */ + v_lport->pfn_unf_qualify_rport = unf_rport_set_qualifier_key_reuse; + v_lport->pfn_unf_tmf_abnormal_recovery = + unf_tmf_timeout_recovery_special; + return RETURN_OK; +RELEASE_LPORT: + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + + unf_disc_mgr_destroy(v_lport); + unf_free_esgl_pool(v_lport); + unf_free_all_xchg_mgr(v_lport); + unf_destroy_scsi_id_table(v_lport); + return ret; +} + +static void unf_destroy_card_thread(struct unf_lport_s *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_chip_manage_info_s *chip_info = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long event_lock_flag = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, (v_lport), return); + + /* If the thread cannot be found, apply for a new thread. */ + chip_info = v_lport->chip_info; + if (!chip_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) has no event thread.", v_lport->port_id); + return; + } + event_mgr = &v_lport->event_mgr; + + spin_lock_irqsave(&chip_info->chip_event_list_lock, flag); + if (!list_empty(&chip_info->list_head)) { + list_for_each_safe(list, list_tmp, &chip_info->list_head) { + event_node = list_entry(list, + struct unf_cm_event_report, + list_entry); + + /* The LPort under the global event node is null. */ + if (v_lport == event_node->lport) { + list_del_init(&event_node->list_entry); + if (event_node->event_asy_flag == + UNF_EVENT_SYN) { + event_node->result = UNF_RETURN_ERROR; + complete(&event_node->event_comp); + } + + spin_lock_irqsave(&event_mgr->port_event_lock, + event_lock_flag); + event_mgr->free_event_count++; + list_add_tail(&event_node->list_entry, + &event_mgr->list_free_event); + spin_unlock_irqrestore( + &event_mgr->port_event_lock, + event_lock_flag); + } + } + } + spin_unlock_irqrestore(&chip_info->chip_event_list_lock, flag); + + /* If the number of events introduced by the event thread is 0, + * it indicates that no interface is used. In this case, thread + * resources need to be consumed + */ + if (atomic_dec_and_test(&chip_info->ref_cnt)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) destroy slot(%u) chip(0x%x) event thread succeed.", + v_lport->port_id, chip_info->slot_id, + chip_info->chip_id); + chip_info->b_thread_exit = UNF_TRUE; + wake_up_process(chip_info->data_thread); + kthread_stop(chip_info->data_thread); + chip_info->data_thread = NULL; + + spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); + list_del_init(&chip_info->list_chip_thread_entry); + card_thread_mgr.card_sum--; + spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, + flag); + + vfree(chip_info); + } + + v_lport->chip_info = NULL; +} + +unsigned int unf_lport_deinit(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x2246, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR); + + /* If the card is unloaded normally, the thread is stopped once. + * The problem does not occur if you stop the thread again. + */ + unf_destroy_lport_route(v_lport); + + /* minus the reference count of the card event; + * the last port deletes the card thread + */ + unf_destroy_card_thread(v_lport); + flush_workqueue(v_lport->link_event_wq); + destroy_workqueue(v_lport->link_event_wq); + v_lport->link_event_wq = NULL; + + /* Release Event Processing Center */ + (void)unf_event_center_destroy(v_lport); + + /* Release the Vport resource pool */ + unf_free_vport_pool(v_lport); + + /* Destroying the Xchg Manager */ + unf_xchg_mgr_destroy(v_lport); + + /* Release Esgl pool */ + unf_free_esgl_pool(v_lport); + + /* reliability review :Disc should release after Xchg. + * Destroy the disc manager + */ + unf_disc_mgr_destroy(v_lport); + + /* Release Xchg Mg template */ + unf_release_xchg_mgr_temp(v_lport); + + /* Release the Lport Mg template */ + unf_release_lport_mgr_temp(v_lport); + + /* Destroy the ScsiId Table */ + unf_destroy_scsi_id_table(v_lport); + + flush_workqueue(v_lport->xchg_wq); + destroy_workqueue(v_lport->xchg_wq); + v_lport->xchg_wq = NULL; + + /* Deregister SCSI Host */ + unf_unregister_scsi_host(v_lport); + + /* Releasing the lw Interface Template */ + unf_lport_release_lw_fun_op(v_lport); + v_lport->fc_port = NULL; + return RETURN_OK; +} + +static int unf_card_event_process(void *v_arg) +{ + struct list_head *node = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long flags = 0; + struct unf_chip_manage_info_s *chip_info = + (struct unf_chip_manage_info_s *)v_arg; + + UNF_REFERNCE_VAR(v_arg); + + set_user_nice(current, 4); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Slot(%u) chip(0x%x) enter event thread.", + chip_info->slot_id, chip_info->chip_id); + + while (!kthread_should_stop()) { + if (chip_info->b_thread_exit == UNF_TRUE) + break; + + spin_lock_irqsave(&chip_info->chip_event_list_lock, flags); + if (list_empty(&chip_info->list_head) == UNF_TRUE) { + spin_unlock_irqrestore(&chip_info->chip_event_list_lock, + flags); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout((long)msecs_to_jiffies(1000)); + } else { + node = (&chip_info->list_head)->next; + list_del_init(node); + chip_info->list_num--; + event_node = list_entry(node, + struct unf_cm_event_report, + list_entry); + spin_unlock_irqrestore(&chip_info->chip_event_list_lock, + flags); + unf_handle_event(event_node); + } + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "Slot(%u) chip(0x%x) exit event thread.", + chip_info->slot_id, chip_info->chip_id); + + return RETURN_OK; +} + +static unsigned int unf_creat_chip_thread(struct unf_lport_s *v_lport) +{ + unsigned long flag = 0; + struct unf_chip_manage_info_s *chip_info = NULL; + + UNF_CHECK_VALID(0x2250, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR); + + /* If the thread cannot be found, apply for a new thread. */ + chip_info = (struct unf_chip_manage_info_s *)vmalloc( + sizeof(struct unf_chip_manage_info_s)); + if (!chip_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) cannot allocate thread memory.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(chip_info, 0, sizeof(struct unf_chip_manage_info_s)); + + memcpy(&chip_info->chip_info, &v_lport->low_level_func.chip_info, + sizeof(struct unf_chip_info_s)); + chip_info->slot_id = + UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(v_lport->port_id); + chip_info->chip_id = v_lport->low_level_func.chip_id; + chip_info->list_num = 0; + chip_info->sfp_9545_fault = UNF_FALSE; + chip_info->sfp_power_fault = UNF_FALSE; + atomic_set(&chip_info->ref_cnt, 1); + atomic_set(&chip_info->card_loop_test_flag, UNF_FALSE); + spin_lock_init(&chip_info->card_loop_back_state_lock); + INIT_LIST_HEAD(&chip_info->list_head); + spin_lock_init(&chip_info->chip_event_list_lock); + + chip_info->b_thread_exit = UNF_FALSE; + chip_info->data_thread = + kthread_create(unf_card_event_process, chip_info, + "%x_et", v_lport->port_id); + + if (IS_ERR(chip_info->data_thread) || + (!chip_info->data_thread)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) creat event thread(0x%p) unsuccessful.", + v_lport->port_id, chip_info->data_thread); + + vfree(chip_info); + + return UNF_RETURN_ERROR; + } + + v_lport->chip_info = chip_info; + wake_up_process(chip_info->data_thread); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) creat slot(%u) chip(0x%x) event thread succeed.", + v_lport->port_id, chip_info->slot_id, chip_info->chip_id); + + spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); + list_add_tail(&chip_info->list_chip_thread_entry, + &card_thread_mgr.list_card_list_head); + card_thread_mgr.card_sum++; + spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); + + return RETURN_OK; +} + +static unsigned int unf_find_chip_thread(struct unf_lport_s *v_lport) +{ + unsigned long flag = 0; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_chip_manage_info_s *chip_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); + list_for_each_safe(node, next_node, + &card_thread_mgr.list_card_list_head) { + chip_info = list_entry(node, struct unf_chip_manage_info_s, + list_chip_thread_entry); + + if ((chip_info->chip_id == v_lport->low_level_func.chip_id) && + (chip_info->slot_id == UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(v_lport->port_id))) { + atomic_inc(&chip_info->ref_cnt); + v_lport->chip_info = chip_info; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_MAJOR, + "Port(0x%x) find card(%u) chip(0x%x) event thread succeed.", + v_lport->port_id, chip_info->slot_id, + chip_info->chip_id); + + spin_unlock_irqrestore( + &card_thread_mgr.global_card_list_lock, flag); + + return RETURN_OK; + } + } + spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); + + ret = unf_creat_chip_thread(v_lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) creat event thread unsuccessful. Destroy LPort.", + v_lport->port_id); + return UNF_RETURN_ERROR; + } else { + return RETURN_OK; + } +} + +static int unf_cm_get_mac_adr(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_chip_info_argout *chp_info = NULL; + + UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_out, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)argc_in; + chp_info = (struct unf_get_chip_info_argout *)argc_out; + + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + " LPort is null."); + + return UNF_RETURN_ERROR; + } + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + if (lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + lport->fc_port, + UNF_PORT_CFG_GET_MAC_ADDR, chp_info) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) get .", lport->port_id); + + return UNF_RETURN_ERROR; + } + return RETURN_OK; +} + +static unsigned int unf_build_lport_wwn(struct unf_lport_s *v_lport) +{ + struct unf_get_chip_info_argout v_wwn = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2403, UNF_TRUE, (v_lport), return UNF_RETURN_ERROR); + + ret = (unsigned int)unf_send_event(v_lport->port_id, + UNF_EVENT_SYN, + (void *)v_lport, + (void *)&v_wwn, + unf_cm_get_mac_adr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "UNF_BuildSysWwn SendEvent(UNF_PortGetMacAdr) fail."); + + return UNF_RETURN_ERROR; + } + + /* save card mode: UNF_FC_SERVER_BOARD_32_G(6):32G; + * UNF_FC_SERVER_BOARD_16_G(7):16G MODE + */ + v_lport->card_type = v_wwn.board_type; + + /* update port max speed */ + if (v_wwn.board_type == UNF_FC_SERVER_BOARD_32_G) + v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_32_G; + else if (v_wwn.board_type == UNF_FC_SERVER_BOARD_16_G) + v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_16_G; + else if (v_wwn.board_type == UNF_FC_SERVER_BOARD_8_G) + v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_8_G; + else + v_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_32_G; + + return RETURN_OK; +} + +void *unf_lport_create_and_init( + void *private_data, + struct unf_low_level_function_op_s *low_level_op) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + if (!private_data) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Private Data is NULL"); + + return NULL; + } + if (!low_level_op) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LowLevel port(0x%p) function is NULL", private_data); + + return NULL; + } + + /* 1. vmalloc & Memset L_Port */ + lport = vmalloc(sizeof(struct unf_lport_s)); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Alloc LPort memory failed."); + + return NULL; + } + memset(lport, 0, sizeof(struct unf_lport_s)); + + /* 2. L_Port Init */ + if (unf_lport_init(lport, private_data, low_level_op) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort initialize unsuccessful."); + + vfree(lport); + + return NULL; + } + + /* 4. Get or Create Chip Thread Chip_ID & Slot_ID */ + ret = unf_find_chip_thread(lport); + if (ret != RETURN_OK) { + (void)unf_lport_deinit(lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%x) Find Chip thread unsuccessful. Destroy LPort.", + lport->port_id); + + vfree(lport); + return NULL; + } + + /* 5. Registers with in the port management global linked list */ + unf_port_register(lport); + /* update WWN */ + if (unf_build_lport_wwn(lport) != RETURN_OK) { + unf_port_unregister(lport); + (void)unf_lport_deinit(lport); + vfree(lport); + return NULL; + } + + unf_init_link_lose_tmo(lport); + + /* initialize Scsi Host */ + if (unf_register_scsi_host(lport) != RETURN_OK) { + unf_port_unregister(lport); + (void)unf_lport_deinit(lport); + vfree(lport); + return NULL; + } + + /* 7. Here, start work now */ + if (global_lport_mgr.b_start_work == UNF_TRUE) { + if (unf_port_start_work(lport) != RETURN_OK) { + unf_port_unregister(lport); + + (void)unf_lport_deinit(lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Port(0x%x) start work failed", + lport->port_id); + vfree(lport); + return NULL; + } + } + + UNF_REFERNCE_VAR(lport); + return lport; +} + +static int unf_lport_destroy(void *v_lport, void *v_arg_out) +{ + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + + if (!v_lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort is NULL."); + + return UNF_RETURN_ERROR; + } + + UNF_REFERNCE_VAR(v_arg_out); + + lport = (struct unf_lport_s *)v_lport; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "Destroy LPort(0x%p), ID(0x%x).", + lport, lport->port_id); + + /* NPIV Ensure that all Vport are deleted */ + unf_destroy_all_vports(lport); + + lport->destroy_step = UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT; + + (void)unf_lport_deinit(v_lport); + + /* The port is removed from the destroy linked list. + * The next step is to release the memory + */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_del(&lport->entry_lport); + + /* If the port has dirty memory, the port is mounted to the + * linked list of dirty ports + */ + if (lport->dirty_flag) + list_add_tail(&lport->entry_lport, + &global_lport_mgr.list_dirty_head); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + if (lport->lport_free_completion) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Complete LPort(0x%p), port ID(0x%x)'s Free Completion.", + lport, lport->port_id); + complete(lport->lport_free_completion); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "LPort(0x%p), port ID(0x%x)'s Free Completion is NULL.", + lport, lport->port_id); + dump_stack(); + } + + return RETURN_OK; +} + +unsigned int unf_lport_refinc(struct unf_lport_s *v_lport) +{ + unsigned long lport_flags = 0; + + UNF_CHECK_VALID(0x2208, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_lport->lport_state_lock, lport_flags); + if (atomic_read(&v_lport->lport_ref_cnt) <= 0) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, + lport_flags); + + return UNF_RETURN_ERROR; + } + + atomic_inc(&v_lport->lport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%p) port_id(0x%x) reference count is %d", + v_lport, v_lport->port_id, + atomic_read(&v_lport->lport_ref_cnt)); + + spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags); + + return RETURN_OK; +} + +void unf_lport_ref_dec(struct unf_lport_s *v_lport) +{ + unsigned long flags = 0; + unsigned long lport_flags = 0; + + UNF_CHECK_VALID(0x2209, UNF_TRUE, v_lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "LPort(0x%p), port ID(0x%x), reference count is %d.", + v_lport, v_lport->port_id, + atomic_read(&v_lport->lport_ref_cnt)); + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + spin_lock_irqsave(&v_lport->lport_state_lock, lport_flags); + if (atomic_dec_and_test(&v_lport->lport_ref_cnt)) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags); + list_del(&v_lport->entry_lport); + global_lport_mgr.lport_sum--; + + /* attaches the lport to the destroy linked list for dfx */ + list_add_tail(&v_lport->entry_lport, + &global_lport_mgr.list_destroy_head); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + (void)unf_lport_destroy(v_lport, NULL); + } else { + spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + } +} + +static int unf_reset_port(void *v_arg_in, void *v_arg_out) +{ + struct unf_reset_port_argin *arg_in = + (struct unf_reset_port_argin *)v_arg_in; + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + enum unf_port_config_state_e port_state = UNF_PORT_CONFIG_STATE_RESET; + + UNF_REFERNCE_VAR(v_arg_out); + UNF_CHECK_VALID(0x2262, UNF_TRUE, arg_in, return UNF_RETURN_ERROR); + + lport = unf_find_lport_by_port_id(arg_in->port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Not find LPort(0x%x).", arg_in->port_id); + + return UNF_RETURN_ERROR; + } + + /* reset port */ + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + lport->en_act_topo = UNF_ACT_TOP_UNKNOWN; + lport->speed = UNF_PORT_SPEED_UNKNOWN; + lport->fabric_node_name = 0; + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_PORT_STATE, (void *)&port_state); + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Reset port(0x%x) unsuccessful.", lport->port_id); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static int unf_sfp_switch(unsigned int v_port_id, int v_turn_on) +{ + struct unf_lport_s *lport = NULL; + int turn_on = v_turn_on; + int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + + if (global_lport_mgr.b_start_work == UNF_FALSE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) not start work, ignored command:turn %s.", + v_port_id, (v_turn_on == UNF_TRUE) ? "ON" : "OFF"); + + return RETURN_OK; + } + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Not find LPort(0x%x).", v_port_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_start_work_state != UNF_START_WORK_COMPLETE) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]LPort(0x%x) not start work, ignored command:turn %s.", + v_port_id, (v_turn_on == UNF_TRUE) ? "ON" : "OFF"); + + return RETURN_OK; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x)'s corresponding function is NULL.", + v_port_id); + + return UNF_RETURN_ERROR; + } + + ret = (int)lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_SFP_SWITCH, + (void *)&turn_on); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) switch SFP+ %s unsuccessful.", + v_port_id, v_turn_on ? "On" : "Off"); + + return UNF_RETURN_ERROR; + } + + lport->b_switch_state = (enum int_e)turn_on; + + return RETURN_OK; +} + +static int unf_sfp_switch_event(void *v_argc_in, void *v_argc_out) +{ + struct unf_set_sfp_argin *in = (struct unf_set_sfp_argin *)v_argc_in; + + UNF_REFERNCE_VAR(v_argc_out); + UNF_CHECK_VALID(0x2267, UNF_TRUE, v_argc_in, return UNF_RETURN_ERROR); + + return unf_sfp_switch(in->port_id, in->turn_on); +} + +int unf_cm_sfp_switch(unsigned int v_port_id, int v_bturn_on) +{ + struct unf_set_sfp_argin in = { 0 }; + + in.port_id = v_port_id; + in.turn_on = v_bturn_on; + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in, + (void *)NULL, unf_sfp_switch_event); +} + +static int unf_get_port_speed(void *v_argc_in, void *v_argc_out) +{ + unsigned int *speed = (unsigned int *)v_argc_out; + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + struct unf_lport_s *lport = NULL; + int ret = 0; + unsigned int port_id = *(unsigned int *)v_argc_in; + + UNF_CHECK_VALID(0x2268, UNF_TRUE, v_argc_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2269, UNF_TRUE, v_argc_out, return UNF_RETURN_ERROR); + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Cannot Find LPort by (0x%x).", port_id); + + return UNF_RETURN_ERROR; + } + + port_mgr = &lport->low_level_func.port_mgr_op; + + if (!port_mgr->pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + port_id); + + return UNF_RETURN_ERROR; + } + + if (lport->link_up == UNF_PORT_LINK_UP) + ret = (int)port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_SPEED_ACT, (void *)speed); + else + *speed = UNF_PORT_SPEED_UNKNOWN; + + return ret; +} + +static int unf_cm_get_port_speed(unsigned int v_port_id, unsigned int *v_speed) +{ + UNF_CHECK_VALID(0x2270, UNF_TRUE, v_speed, return UNF_RETURN_ERROR); + + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id, + (void *)v_speed, unf_get_port_speed); +} + +static int unf_set_port_speed(void *v_argc_in, void *v_argc_out) +{ + unsigned int ret = RETURN_OK; + struct unf_set_speed_argin *in = + (struct unf_set_speed_argin *)v_argc_in; + struct unf_lport_s *lport = NULL; + + UNF_REFERNCE_VAR(v_argc_out); + UNF_CHECK_VALID(0x2271, UNF_TRUE, v_argc_in, return UNF_RETURN_ERROR); + lport = unf_find_lport_by_port_id(in->port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Cannot Find LPort by (0x%x).", in->port_id); + + return UNF_RETURN_ERROR; + } + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + in->port_id); + + return UNF_RETURN_ERROR; + } + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_SPEED, (void *)in->speed); + + return (int)ret; +} + +int unf_cm_set_port_speed(unsigned int v_port_id, unsigned int *v_speed) +{ + struct unf_set_speed_argin in = { 0 }; + + in.port_id = v_port_id; + in.speed = v_speed; + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in, + (void *)NULL, unf_set_port_speed); +} + +static int unf_get_port_topo(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_topo_argout *out = NULL; + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + int ret = UNF_TRUE; + unsigned int port_id = 0; + + UNF_CHECK_VALID(0x2283, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2284, UNF_TRUE, argc_out, return UNF_RETURN_ERROR); + port_id = *(unsigned int *)argc_in; + out = (struct unf_get_topo_argout *)argc_out; + + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Not find LPort(0x%x).", port_id); + + return UNF_RETURN_ERROR; + } + + port_mgr = &lport->low_level_func.port_mgr_op; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + port_mgr->pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + + if (lport->link_up == UNF_PORT_LINK_UP) { + ret = (int)port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_TOPO_ACT, (void *)out->en_act_topo); + if (ret != RETURN_OK) + return ret; + + } else { + *out->en_act_topo = UNF_ACT_TOP_UNKNOWN; + } + + ret = (int)port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_TOPO_CFG, (void *)out->topo_cfg); + + return ret; +} + +int unf_cm_get_port_topo(unsigned int v_port_id, unsigned int *v_topo_cfg, + enum unf_act_topo_e *v_en_act_topo) +{ + struct unf_get_topo_argout out = { 0 }; + + UNF_CHECK_VALID(0x2286, UNF_TRUE, v_topo_cfg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2287, UNF_TRUE, v_en_act_topo, + return UNF_RETURN_ERROR); + + out.en_act_topo = v_en_act_topo; + out.topo_cfg = v_topo_cfg; + + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id, + (void *)&out, unf_get_port_topo); +} + +static int unf_set_port_topo(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_set_topo_argin *in = NULL; + enum int_e *b_arg_out = (enum int_e *)argc_out; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2257, UNF_TRUE, argc_out, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2288, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + in = (struct unf_set_topo_argin *)argc_in; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + (in->topo == UNF_TOP_LOOP_MASK) || + (in->topo == UNF_TOP_P2P_MASK) || + (in->topo == UNF_TOP_AUTO_MASK), + return UNF_RETURN_ERROR); + + lport = unf_find_lport_by_port_id(in->port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Not find LPort(0x%x).", in->port_id); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_VALID( + INVALID_VALUE32, UNF_TRUE, + lport->low_level_func.port_mgr_op.pfn_ll_port_config_set, + return UNF_RETURN_ERROR); + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_TOPO, (void *)&in->topo); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Can't set port topology."); + + return UNF_RETURN_ERROR; + } + + lport->low_level_func.lport_cfg_items.port_topology = in->topo; + *b_arg_out = lport->b_switch_state; + + return RETURN_OK; +} + +int unf_cm_set_port_topo(unsigned int v_port_id, unsigned int v_topo) +{ + struct unf_set_topo_argin in = { 0 }; + int ret = UNF_RETURN_ERROR; + enum int_e b_switch_state = UNF_FALSE; + + in.port_id = v_port_id; + in.topo = v_topo; + + ret = unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in, + (void *)&b_switch_state, unf_set_port_topo); + + return ret; +} + +int unf_set_port_bbscn(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_set_bbscn_argin *in = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_REFERNCE_VAR(argc_out); + UNF_CHECK_VALID(0x2300, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + in = (struct unf_set_bbscn_argin *)argc_in; + + lport = unf_find_lport_by_port_id(in->port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Not find LPort(0x%x).", in->port_id); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_VALID( + INVALID_VALUE32, UNF_TRUE, + lport->low_level_func.port_mgr_op.pfn_ll_port_config_set, + return UNF_RETURN_ERROR); + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_BBSCN, (void *)&in->bb_scn); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Cannot set port BB_SC_N."); + + return UNF_RETURN_ERROR; + } + + /* update bbsn cfg to Lport */ + lport->low_level_func.lport_cfg_items.bb_scn = in->bb_scn; + + return RETURN_OK; +} + +int unf_cm_set_port_bbscn(unsigned int v_port_id, unsigned int v_bbscn) +{ + struct unf_set_bbscn_argin in = { 0 }; + + in.port_id = v_port_id; + in.bb_scn = v_bbscn; + + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&in, + (void *)NULL, unf_set_port_bbscn); +} + +unsigned int unf_get_error_code_sum(struct unf_lport_s *v_lport, + struct unf_err_code_s *v_fc_err_code) +{ + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + struct unf_lport_s *lport = v_lport; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_err_code_s fc_err_code; + + UNF_CHECK_VALID(0x2328, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2329, UNF_TRUE, v_fc_err_code, + return UNF_RETURN_ERROR); + + memset(&fc_err_code, 0, sizeof(struct unf_err_code_s)); + + port_mgr = &lport->low_level_func.port_mgr_op; + if (!port_mgr->pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + ret = port_mgr->pfn_ll_port_config_get((void *)lport->fc_port, + UNF_PORT_CFG_GET_LESB_THEN_CLR, (void *)&fc_err_code); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + if (lport->link_up != UNF_PORT_LINK_UP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_INFO, + "LPort(0x%x) is not link up.", lport->port_id); + memcpy(v_fc_err_code, &lport->err_code_sum, + sizeof(struct unf_err_code_s)); + + return RETURN_OK; + } + + lport->err_code_sum.bad_rx_char_count += fc_err_code.bad_rx_char_count; + lport->err_code_sum.link_fail_count += fc_err_code.link_fail_count; + lport->err_code_sum.loss_of_signal_count += + fc_err_code.loss_of_signal_count; + lport->err_code_sum.loss_of_sync_count += + fc_err_code.loss_of_sync_count; + lport->err_code_sum.proto_error_count += fc_err_code.proto_error_count; + + lport->err_code_sum.rx_eo_fa_count = fc_err_code.rx_eo_fa_count; + lport->err_code_sum.dis_frame_count = fc_err_code.dis_frame_count; + lport->err_code_sum.bad_crc_count = fc_err_code.bad_crc_count; + + memcpy(v_fc_err_code, &lport->err_code_sum, + sizeof(struct unf_err_code_s)); + + return RETURN_OK; +} + +static int unf_clear_port_error_code_sum(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + unsigned int port_id = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2331, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(argc_out); + + port_id = *(unsigned int *)argc_in; + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Cannot find LPort(0x%x).", port_id); + + return UNF_RETURN_ERROR; + } + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + port_id); + + return UNF_RETURN_ERROR; + } + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + (void *)lport->fc_port, + UNF_PORT_CFG_CLR_LESB, NULL); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + memset(&lport->err_code_sum, 0, sizeof(struct unf_err_code_s)); + + return RETURN_OK; +} + +int unf_cm_clear_port_error_code_sum(unsigned int v_port_id) +{ + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id, + (void *)NULL, unf_clear_port_error_code_sum); +} + +static int unf_update_lport_sfp_info(struct unf_lport_s *v_lport, + enum unf_port_config_get_op_e v_type) +{ + struct unf_lport_s *lport = NULL; + int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2332, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + lport = v_lport; + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + ret = (int)(lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + (void *)lport->fc_port, + v_type, (void *)&lport->sfp_info)); + + return ret; +} + +static int unf_translate_sfp_status(struct unf_lport_s *v_lport, + struct unf_get_sfp_argout *v_out) +{ + struct unf_lport_s *lport = v_lport; + int ret = UNF_RETURN_ERROR; + + switch (lport->sfp_info.status) { + case UNF_SFP_PRESENT_FAIL: + *v_out->status = DRV_CABLE_CONNECTOR_NONE; + ret = RETURN_OK; + break; + case UNF_SFP_POWER_FAIL: + *v_out->status = DRV_CABLE_CONNECTOR_INVALID; + ret = RETURN_OK; + break; + case UNF_9545_FAIL: + *v_out->status = DRV_CABLE_CONNECTOR_INVALID; + ret = RETURN_OK; + break; + default: + *v_out->status = DRV_CABLE_CONNECTOR_BUTT; + ret = UNF_RETURN_ERROR; + break; + } + + return ret; +} + +static void unf_record_chip_fault(struct unf_lport_s *v_lport) +{ +#define UNF_CHIP_FAULT_MAX_CHECK_TIME 3 + + if (v_lport->sfp_info.status == UNF_9545_FAIL) { + /* If there are 9545 fault,explain that the sfp is power on, + * and reset sfp_power_fault_count + */ + v_lport->sfp_power_fault_count = 0; + + if (v_lport->sfp_9545_fault_count < + UNF_CHIP_FAULT_MAX_CHECK_TIME) { + v_lport->sfp_9545_fault_count++; + } else { + v_lport->chip_info->sfp_9545_fault = UNF_TRUE; + v_lport->sfp_9545_fault_count = 0; + } + } else if (v_lport->sfp_info.status == UNF_SFP_POWER_FAIL) { + if (v_lport->sfp_power_fault_count < + UNF_CHIP_FAULT_MAX_CHECK_TIME) { + v_lport->sfp_power_fault_count++; + } else { + v_lport->chip_info->sfp_power_fault = UNF_TRUE; + v_lport->sfp_power_fault_count = 0; + } + } +} + +int unf_check_sfp_tx_fault(struct unf_lport_s *v_lport, + struct unf_sfp_info_s *v_sfp_info) +{ + /* 24 hours ms(24*60*60*1000) */ +#define UNF_SFP_TXFALT_RECOVER_INTERVEL 86400000 + + struct unf_sfp_info_s *sfp_info = NULL; + struct unf_lport_s *lport = NULL; + + sfp_info = v_sfp_info; + lport = v_lport; + + if (sfp_info->sfp_info_a2.diag.status_ctrl.tx_fault_state == 0) + return RETURN_OK; + + /* Repair conditions: + * 1 port linkdown; + * 2 from the last repair more than 24 hours; + * 3 sfp is on + */ + if ((lport->link_up == UNF_PORT_LINK_DOWN) && + (lport->b_switch_state) && + ((lport->last_tx_fault_jif == 0) || + (jiffies_to_msecs(jiffies - lport->last_tx_fault_jif) > + UNF_SFP_TXFALT_RECOVER_INTERVEL))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "LPort(0x%x) stat(0x%x) jiff(%ld) lastjiff(%llu) Ctrl(0x%x) TxFault set 1.", + lport->port_id, lport->link_up, jiffies, + lport->last_tx_fault_jif, + *((unsigned char *) + &sfp_info->sfp_info_a2.diag.status_ctrl)); + + lport->last_tx_fault_jif = jiffies; + (void)unf_sfp_switch(lport->port_id, UNF_FALSE); + msleep(100); + + /* Around quickly switch port FW state error problem */ + (void)unf_sfp_switch(lport->port_id, UNF_TRUE); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static int unf_get_sfp_info(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_sfp_argout *out = NULL; + unsigned int port_id = 0; + int ret = RETURN_OK; + + UNF_CHECK_VALID(0x2333, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2334, UNF_TRUE, argc_out, return UNF_RETURN_ERROR); + + port_id = *(unsigned int *)argc_in; + out = (struct unf_get_sfp_argout *)argc_out; + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Cannot find LPort(0x%x).", port_id); + + return UNF_RETURN_ERROR; + } + + lport->sfp_info.status = 0; + + ret = unf_update_lport_sfp_info(lport, UNF_PORT_CFG_GET_SFP_INFO); + + if (ret == RETURN_OK) { + lport->sfp_power_fault_count = 0; + lport->sfp_9545_fault_count = 0; + *out->status = DRV_CABLE_CONNECTOR_OPTICAL; + if (unf_check_sfp_tx_fault( + lport, + &lport->sfp_info.sfp_eeprom_info.sfp_info) == + UNF_RETURN_ERROR) { + return UNF_RETURN_ERROR; + } + + memcpy(out->sfp_info, &lport->sfp_info.sfp_eeprom_info, + sizeof(union unf_sfp_eeprome_info)); + ret = RETURN_OK; + } else { + ret = unf_translate_sfp_status(lport, out); + + unf_record_chip_fault(lport); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x)'s getsfpinfo fail, sfp status(0x%x).", + lport->port_id, lport->sfp_info.status); + } + + return ret; +} + +int unf_cm_get_sfp_info(unsigned int v_port_id, unsigned int *v_status, + union unf_sfp_eeprome_info *v_sfp_info, + unsigned int *sfp_type) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_sfp_argout out = { 0 }; + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) + return UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2335, UNF_TRUE, v_status, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2336, UNF_TRUE, v_sfp_info, return UNF_RETURN_ERROR); + + out.status = v_status; + out.sfp_info = v_sfp_info; + + if (global_lport_mgr.b_start_work == UNF_FALSE) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) have not start work, return.", v_port_id); + return UNF_RETURN_ERROR; + } + + *sfp_type = lport->low_level_func.sfp_type; + return unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id, + (void *)&out, unf_get_sfp_info); +} + +int unf_cm_reset_port(unsigned int v_port_id) +{ + int ret = UNF_RETURN_ERROR; + + ret = unf_send_event(v_port_id, UNF_EVENT_SYN, (void *)&v_port_id, + (void *)NULL, unf_reset_port); + return ret; +} + +int unf_lport_reset_port(struct unf_lport_s *v_lport, unsigned int v_flag) +{ + UNF_CHECK_VALID(0x2352, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + return unf_send_event(v_lport->port_id, v_flag, + (void *)&v_lport->port_id, + (void *)NULL, + unf_reset_port); +} + +static inline unsigned int unf_get_loop_alpa(struct unf_lport_s *v_lport, + void *v_loop_alpa) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2357, UNF_TRUE, + v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_LOOP_ALPA, v_loop_alpa); + return ret; +} + +static unsigned int unf_lport_enter_private_loop_login( + struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2358, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&lport->lport_state_lock, flag); + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_READY); + /* LPort: LINK_UP --> READY */ + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + unf_lport_update_topo(lport, UNF_ACT_TOP_PRIVATE_LOOP); + + /* NOP: check L_Port state */ + if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) is NOP, do nothing", + lport->port_id); + + return RETURN_OK; + } + + /* INI: check L_Port mode */ + if ((lport->options & UNF_PORT_MODE_INI) != UNF_PORT_MODE_INI) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) has no INI feature(0x%x), do nothing", + lport->port_id, lport->options); + + return RETURN_OK; + } + + if (lport->disc.unf_disc_temp.pfn_unf_disc_start) { + ret = lport->disc.unf_disc_temp.pfn_unf_disc_start(lport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) with nportid(0x%x) start discovery failed", + lport->port_id, lport->nport_id); + } + } + + return ret; +} + +unsigned int unf_lport_login(struct unf_lport_s *v_lport, + enum unf_act_topo_e v_en_act_topo) +{ + unsigned int loop_alpa = 0; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2359, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* 1. Update (set) L_Port topo which get from low level */ + unf_lport_update_topo(v_lport, v_en_act_topo); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + + /* 2. Link state check */ + if (v_lport->link_up != UNF_PORT_LINK_UP) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) with link_state(0x%x) port_state(0x%x) when login", + v_lport->port_id, v_lport->link_up, + v_lport->en_states); + + return UNF_RETURN_ERROR; + } + + /* 3. Update L_Port state */ + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_LINK_UP); + /* LPort: INITIAL --> LINK UP */ + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) start to login with topology(0x%x)", + v_lport->port_id, v_lport->en_act_topo); + + /* 4. Start logoin */ + if ((v_en_act_topo == UNF_TOP_P2P_MASK) || + (v_en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + /* P2P or Fabric mode */ + ret = unf_lport_enter_flogi(v_lport); + } else if (v_en_act_topo == UNF_ACT_TOP_PUBLIC_LOOP) { + /* Public loop */ + (void)unf_get_loop_alpa(v_lport, &loop_alpa); + + /* Before FLOGI ALPA just low 8 bit after FLOGI ACC switch + * will assign complete addresses + */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + v_lport->nport_id = loop_alpa; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + ret = unf_lport_enter_flogi(v_lport); + } else if (v_en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + /* Private loop */ + (void)unf_get_loop_alpa(v_lport, &loop_alpa); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + v_lport->nport_id = loop_alpa; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + ret = unf_lport_enter_private_loop_login(v_lport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]LOGIN: Port(0x%x) login with unknown topology(0x%x)", + v_lport->port_id, v_lport->en_act_topo); + } + + return ret; +} + +static unsigned int unf_port_link_up(struct unf_lport_s *v_lport, + void *v_in_put) +{ + struct unf_lport_s *lport = v_lport; + unsigned int ret = RETURN_OK; + enum unf_act_topo_e en_act_topo = UNF_ACT_TOP_UNKNOWN; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2361, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_in_put); + + /* If NOP state, stop */ + if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) is NOP and do nothing", + lport->port_id); + + return RETURN_OK; + } + + /* Update port state */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + lport->link_up = UNF_PORT_LINK_UP; + lport->speed = *((unsigned int *)v_in_put); + unf_set_lport_state(v_lport, UNF_LPORT_ST_INITIAL); + /* INITIAL state */ + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* set hot pool wait state: so far, do not care */ + unf_set_hot_pool_wait_state(lport, UNF_TRUE); + + lport->enhanced_features |= UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE; + + /* Get port active topopolgy (from low level) */ + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) get topo function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + lport->fc_port, + UNF_PORT_CFG_GET_TOPO_ACT, (void *)&en_act_topo); + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) get topo from low level failed", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Start Login process */ + ret = unf_lport_login(lport, en_act_topo); + + unf_report_io_dm_event(lport, UNF_PORT_LINK_UP, 0); + return ret; +} + +static unsigned int unf_port_link_down(struct unf_lport_s *v_lport, + void *v_in_put) +{ + unsigned long flag = 0; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x2363, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_in_put); + lport = v_lport; + unf_report_io_dm_event(lport, UNF_PORT_LINK_DOWN, 0); + + /* To prevent repeated reporting linkdown */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + lport->speed = UNF_PORT_SPEED_UNKNOWN; + lport->en_act_topo = UNF_ACT_TOP_UNKNOWN; + if (lport->link_up == UNF_PORT_LINK_DOWN) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + return RETURN_OK; + } + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_LINK_DOWN); + unf_reset_lport_params(lport); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + unf_set_hot_pool_wait_state(lport, UNF_FALSE); + + /* + * clear I/O: + * 1. INI do ABORT only, + * for INI: busy/delay/delay_transfer/wait + * Clean L_Port/V_Port Link Down I/O: only set ABORT tag + */ + unf_flush_disc_event(&lport->disc, NULL); + + unf_clean_link_down_io(lport, UNF_FALSE); + + /* for L_Port's R_Ports */ + unf_clean_linkdown_rport(lport); + /* for L_Port's all Vports */ + unf_linkdown_all_vports(v_lport); + return RETURN_OK; +} + +static unsigned int unf_port_abnormal_reset(struct unf_lport_s *v_lport, + void *v_in_put) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x2363, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_in_put); + + lport = v_lport; + + ret = (unsigned int)unf_lport_reset_port(lport, UNF_EVENT_ASYN); + + return ret; +} + +static unsigned int unf_port_reset_start(struct unf_lport_s *v_lport, + void *v_in_put) +{ + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2364, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_in_put); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_set_lport_state(v_lport, UNF_LPORT_ST_RESET); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Port(0x%x) begin to reset.", v_lport->port_id); + + return ret; +} + +static unsigned int unf_port_reset_end(struct unf_lport_s *v_lport, + void *v_in_put) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2365, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_in_put); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Port(0x%x) reset end.", v_lport->port_id); + + /* Task management command returns success and avoid + * repair measures case offline device + */ + unf_wakeup_scsi_task_cmnd(v_lport); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_set_lport_state(v_lport, UNF_LPORT_ST_INITIAL); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + return RETURN_OK; +} + +static unsigned int unf_port_nop(struct unf_lport_s *v_lport, void *v_in_put) +{ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2366, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_in_put); + lport = v_lport; + + atomic_set(&lport->port_no_operater_flag, UNF_LPORT_NOP); + + spin_lock_irqsave(&lport->lport_state_lock, flag); + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_LINK_DOWN); + unf_reset_lport_params(lport); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Set Tag prevent pending I/O to wait_list when close sfp failed */ + unf_set_hot_pool_wait_state(lport, UNF_FALSE); + + unf_flush_disc_event(&lport->disc, NULL); + + /* L_Port/V_Port's I/O(s): Clean Link Down I/O: Set Abort Tag */ + unf_clean_link_down_io(lport, UNF_FALSE); + + /* L_Port/V_Port's R_Port(s): report link down event to + * scsi & clear resource + */ + unf_clean_linkdown_rport(lport); + unf_linkdown_all_vports(lport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) report NOP event done", + lport->nport_id); + + return RETURN_OK; +} + +static unsigned int unf_port_clean_done(struct unf_lport_s *v_lport, + void *v_in_put) +{ + UNF_CHECK_VALID(0x2691, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_in_put); + + /* when port reset,delte delete all Rport immediately, + * in order to remove immediately for resources + */ + unf_clean_linkdown_rport(v_lport); + + return RETURN_OK; +} + +static unsigned int unf_port_begin_remove(struct unf_lport_s *v_lport, + void *v_in_put) +{ + UNF_CHECK_VALID(0x2691, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_in_put); + + /* Cancel route timer delay work */ + unf_destroy_lport_route(v_lport); + + return RETURN_OK; +} + +static unsigned int unf_get_pcie_link_state(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + int link_state = UNF_TRUE; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2257, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + UNF_CHECK_VALID( + INVALID_VALUE32, UNF_TRUE, + lport->low_level_func.port_mgr_op.pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + lport->fc_port, + UNF_PORT_CFG_GET_PCIE_LINK_STATE, (void *)&link_state); + if (ret != RETURN_OK || link_state != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[err]Can't Get Pcie Link State"); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +void unf_root_lport_ref_dec(struct unf_lport_s *v_lport) +{ + unsigned long flags = 0; + unsigned long lport_flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2385, UNF_TRUE, v_lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%p) port_id(0x%x) reference count is %d", + v_lport, v_lport->port_id, + atomic_read(&v_lport->lport_ref_cnt)); + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + spin_lock_irqsave(&v_lport->lport_state_lock, lport_flags); + if (atomic_dec_and_test(&v_lport->lport_ref_cnt)) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags); + + list_del(&v_lport->entry_lport); + global_lport_mgr.lport_sum--; + + /* Put L_Port to destroy list for debuging */ + list_add_tail(&v_lport->entry_lport, + &global_lport_mgr.list_destroy_head); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + ret = unf_schedule_global_event((void *)v_lport, + UNF_GLOBAL_EVENT_ASYN, + unf_lport_destroy); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, + UNF_CRITICAL, + "[warn]Schedule global event faile. remain nodes(0x%x)", + global_event_queue.list_number); + } else { + spin_unlock_irqrestore(&v_lport->lport_state_lock, lport_flags); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + } +} + +void unf_lport_ref_dec_to_destroy(struct unf_lport_s *v_lport) +{ + if (v_lport->root_lport != v_lport) + unf_vport_ref_dec(v_lport); + else + unf_root_lport_ref_dec(v_lport); +} + +void unf_lport_route_work(struct work_struct *v_work) +{ +#define MAX_INTERVAL_TIMES 60 + + struct unf_lport_s *lport = NULL; + int ret = 0; + struct unf_err_code_s fc_err_code; + + UNF_CHECK_VALID(0x2388, UNF_TRUE, v_work, return); + + lport = container_of(v_work, struct unf_lport_s, route_timer_work.work); + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, + UNF_KEVENT, "[err]LPort is NULL"); + + return; + } + + if (unlikely(lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[warn]LPort(0x%x) route work is closing.", + lport->port_id); + + unf_lport_ref_dec_to_destroy(lport); + + return; + } + + if (unlikely(unf_get_pcie_link_state(lport))) + lport->pcie_link_down_cnt++; + else + lport->pcie_link_down_cnt = 0; + + if (lport->pcie_link_down_cnt >= 3) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[warn]LPort(0x%x) detected pcie linkdown, closing route work", + lport->port_id); + lport->b_pcie_linkdown = UNF_TRUE; + unf_free_lport_all_xchg(lport); + unf_lport_ref_dec_to_destroy(lport); + return; + } + + if (unlikely(UNF_LPORT_CHIP_ERROR(lport))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[warn]LPort(0x%x) reported chip error, closing route work. ", + lport->port_id); + + unf_lport_ref_dec_to_destroy(lport); + + return; + } + + if (lport->enhanced_features & + UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[warn]User close LPort(0x%x) route work. ", + lport->port_id); + + unf_lport_ref_dec_to_destroy(lport); + + return; + } + if (atomic_read(&lport->err_code_obtain_freq) == 0) { + memset(&fc_err_code, 0, sizeof(struct unf_err_code_s)); + unf_get_error_code_sum(lport, &fc_err_code); + atomic_inc(&lport->err_code_obtain_freq); + } else if (atomic_read(&lport->err_code_obtain_freq) == + MAX_INTERVAL_TIMES) { + atomic_set(&lport->err_code_obtain_freq, 0); + } else { + atomic_inc(&lport->err_code_obtain_freq); + } + /* Scheduling 1 second */ + ret = queue_delayed_work( + unf_work_queue, &lport->route_timer_work, + (unsigned long)msecs_to_jiffies(UNF_LPORT_POLL_TIMER)); + if (ret == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_KEVENT, + "[warn]LPort(0x%x) schedule work unsuccessful.", + lport->port_id); + + unf_lport_ref_dec_to_destroy(lport); + } +} + +int unf_cm_get_port_info(void *argc_in, void *argc_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_port_info_argout *port_info = NULL; + + UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2398, UNF_TRUE, argc_out, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)argc_in; + port_info = (struct unf_get_port_info_argout *)argc_out; + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + if (lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + lport->fc_port, + UNF_PORT_CFG_GET_PORT_INFO, port_info) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) get current info failed.", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int unf_get_lport_current_info(struct unf_lport_s *v_lport) +{ + struct unf_get_port_info_argout port_info = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x2403, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + lport = unf_find_lport_by_port_id(v_lport->port_id); + + if (!lport) + return UNF_RETURN_ERROR; + + ret = (unsigned int)unf_send_event(lport->port_id, UNF_EVENT_SYN, + (void *)lport, + (void *)&port_info, + unf_cm_get_port_info); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "UNF_GetPortCurrentInfo SendEvent(unf_cm_get_port_info) fail."); + + return UNF_RETURN_ERROR; + } + + lport->low_level_func.sfp_speed = port_info.sfp_speed; + + return RETURN_OK; +} + +int unf_set_link_lose_tmo_to_up(struct unf_lport_s *v_lport, + struct unf_flash_link_tmo_s *v_link_tmo) +{ + int ret = UNF_RETURN_ERROR; + struct unf_flash_data_s flash_data; + + if ((!v_lport) || (!v_link_tmo) || + (sizeof(struct unf_flash_data_s) > HIFC_FLASH_DATA_MAX_LEN)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]set link tmo param check fail"); + return ret; + } + memset(&flash_data, 0, sizeof(struct unf_flash_data_s)); + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]link tmo fun null"); + return ret; + } + if (v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_FLASH_DATA_INFO, &flash_data) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]get link tmo to up fail"); + return ret; + } + + memcpy(&flash_data.link_tmo, v_link_tmo, HIFC_FLASH_LINK_TMO_MAX_LEN); + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]set link tmo fun null"); + return ret; + } + + if (v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + v_lport->fc_port, UNF_PORT_CFG_SET_FLASH_DATA_INFO, + &flash_data) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]set link tmo to up fail"); + return ret; + } + ret = RETURN_OK; + + return ret; +} + +int unf_set_link_lose_tmo(struct unf_lport_s *v_lport, int time_out) +{ + struct unf_flash_link_tmo_s flash_link_tmo; + int ret = UNF_RETURN_ERROR; + unsigned int link_tmo = (unsigned int)time_out; + + memset(&flash_link_tmo, 0, sizeof(struct unf_flash_link_tmo_s)); + + if (!v_lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, + UNF_KEVENT, "[warn]set link tmo lport null"); + return ret; + } + + /* 1. update gloabl var */ + if ((int)atomic_read(&v_lport->link_lose_tmo) == time_out) + return RETURN_OK; + + atomic_set(&v_lport->link_lose_tmo, time_out); + + flash_link_tmo.writeflag = HIFC_MGMT_TMO_MAGIC_NUM; + flash_link_tmo.link_tmo0 = (unsigned char)link_tmo; + flash_link_tmo.link_tmo1 = (unsigned char)(link_tmo >> 8); + flash_link_tmo.link_tmo2 = (unsigned char)(link_tmo >> 16); + flash_link_tmo.link_tmo3 = (unsigned char)(link_tmo >> 24); + + /* 2. write to up */ + ret = unf_set_link_lose_tmo_to_up(v_lport, &flash_link_tmo); + + return ret; +} + +int unf_set_link_lose_tmo_to_all(int time_out) +{ + int ret = RETURN_OK; + struct list_head list_lport_tmp_head; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + + INIT_LIST_HEAD(&list_lport_tmp_head); + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_safe(node, next_node, + &global_lport_mgr.list_lport_list_head) { + lport = list_entry(node, struct unf_lport_s, entry_lport); + list_del_init(&lport->entry_lport); + list_add_tail(&lport->entry_lport, &list_lport_tmp_head); + (void)unf_lport_refinc(lport); + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + while (!list_empty(&list_lport_tmp_head)) { + node = (&list_lport_tmp_head)->next; + lport = list_entry(node, struct unf_lport_s, entry_lport); + if (lport) + unf_set_link_lose_tmo(lport, time_out); + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, + flags); + list_del_init(&lport->entry_lport); + list_add_tail(&lport->entry_lport, + &global_lport_mgr.list_lport_list_head); + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + unf_lport_ref_dec_to_destroy(lport); + } + + return ret; +} + +static int unf_cm_adm_show_xchg(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + unsigned long flags = 0; + unsigned int aborted = 0; + unsigned int ini_busy = 0; + unsigned int tgt_busy = 0; + unsigned int delay = 0; + unsigned int free = 0; + unsigned int wait = 0; + unsigned int sfs_free = 0; + unsigned int sfs_busy = 0; + unsigned int i; + struct unf_adm_xchg *buff_out = NULL; + + buff_out = (struct unf_adm_xchg *)v_input->buff_out; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >= + sizeof(struct unf_adm_xchg), return UNF_RETURN_ERROR); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + if (!xchg_mgr) + continue; + + if (!xchg_mgr->hot_pool) + continue; + + /* hot Xchg */ + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + flags); + + UNF_TRACE(0x2659, UNF_LOG_NORMAL, UNF_INFO, "ini busy :"); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->ini_busylist) { + ini_busy++; + + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(0x2660, UNF_LOG_NORMAL, UNF_INFO, + "0x%p--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--(%llu)", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + (unsigned int)xchg->seq_id, + (unsigned int)xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + } + + UNF_TRACE(0x2665, UNF_LOG_NORMAL, UNF_INFO, "SFS Busy:"); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->sfs_busylist) { + sfs_busy++; + + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(0x2666, UNF_LOG_NORMAL, UNF_INFO, + "0x%p--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--0x%x--(%llu)", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + (unsigned int)xchg->seq_id, + (unsigned int)xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + } + + spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + flags); + + /* free Xchg */ + spin_lock_irqsave(&xchg_mgr->free_pool.xchg_free_pool_lock, + flags); + + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->free_pool.list_free_xchg_list) { + free++; + } + + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->free_pool.list_sfs_xchg_list) { + sfs_free++; + } + spin_unlock_irqrestore(&xchg_mgr->free_pool.xchg_free_pool_lock, + flags); + + ret = RETURN_OK; + } + + buff_out->aborted = aborted; + buff_out->ini_busy = ini_busy; + buff_out->tgt_busy = tgt_busy; + buff_out->delay = delay; + buff_out->free = free; + buff_out->wait = wait; + buff_out->sfs_free = sfs_free; + buff_out->sfs_busy = sfs_busy; + UNF_REFERNCE_VAR(xchg); + return ret; +} + +static int unf_cm_adm_link_time_out_opt(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = RETURN_OK; + int time_out = 0; + struct unf_link_tmo_opt_s *buff_in = NULL; + struct unf_link_tmo_opt_s *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, + return RETURN_ERROR); + + buff_in = (struct unf_link_tmo_opt_s *)(v_input->buff_in); + buff_out = (struct unf_link_tmo_opt_s *)(v_input->buff_out); + + msg_head.status = UNF_ADMIN_MSG_DONE; + msg_head.size = sizeof(struct unf_admin_msg_head); + if (buff_in->link_opt) { + /* set link tmo value */ + time_out = unf_get_link_lose_tmo(v_lport); + /* compatible for PI2 branch tool (not release)not + * include syncAllPort section + */ + if (v_input->in_size > 16) { + if (buff_in->sync_all_port) + /* sync to all other lport */ + unf_set_link_lose_tmo_to_all(buff_in->tmo_value); + else + unf_set_link_lose_tmo(v_lport, + buff_in->tmo_value); + + buff_out->sync_all_port = 1; + } else { + unf_set_link_lose_tmo_to_all(buff_in->tmo_value); + } + + buff_out->link_opt = 1; + + /* return orige value */ + buff_out->tmo_value = time_out; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_KEVENT, + "[info]set fc port(0x%0x)link tmo value(%d -> %d) success .", + v_lport->nport_id, time_out, buff_out->tmo_value); + } else { + /* get link tmo value */ + buff_out->tmo_value = unf_get_link_lose_tmo(v_lport); + buff_out->link_opt = 0; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "get fc port(0x%0x) link tmo value(%d) success .", + v_lport->nport_id, buff_out->tmo_value); + } + *v_input->out_size = v_input->in_size; + memcpy((void *)buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + return ret; +} + +static int unf_cm_adm_log_level_opt(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = RETURN_OK; + unsigned int log_level = 0; + unsigned int log_count = 0; + struct unf_log_level_opt_s *buff_in = NULL; + struct unf_log_level_opt_s *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_log_level_opt_s), + return RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= + sizeof(struct unf_log_level_opt_s), + return RETURN_ERROR); + + buff_in = (struct unf_log_level_opt_s *)(v_input->buff_in); + buff_out = (struct unf_log_level_opt_s *)(v_input->buff_out); + + msg_head.status = UNF_ADMIN_MSG_DONE; + msg_head.size = sizeof(struct unf_admin_msg_head); + if (buff_in->log_opt) { + /* set log level value */ + log_level = log_print_level; + log_count = log_limted_times; + log_print_level = buff_in->log_level; + log_limted_times = buff_in->log_fre_qunce; + buff_out->log_opt = 1; + /* return orige value */ + + buff_out->log_level = log_level; + buff_out->log_fre_qunce = log_count; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "set fc log level(%u -> %u), frenqunce(%u -> %u)in 2s success .", + log_level, log_print_level, log_count, + log_limted_times); + } else { + /* get link tmo value */ + buff_out->log_level = log_print_level; + buff_out->log_fre_qunce = log_limted_times; + buff_out->log_opt = 0; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "get fc log level(%u),frenqunce(%u) in 2s success .", + buff_out->log_level, buff_out->log_fre_qunce); + } + *v_input->out_size = sizeof(struct unf_log_level_opt_s); + memcpy((void *)buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + return ret; +} + +int unf_cm_echo_test(unsigned int v_port_id, unsigned int v_nport_id, + unsigned int *v_link_delay) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + int ret = RETURN_OK; + unsigned int index = 0; + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "fcping request failed [invalid source lport (0x%x)].\n", + v_port_id); + + return UNF_RETURN_ERROR; + } + + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if ((!rport) || (v_nport_id == UNF_FC_FID_FLOGI)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "fcping request failed [invalid destination rport(0x%x)].\n", + v_nport_id); + + return UNF_RETURN_ERROR; + } + + for (index = 0; index < UNF_ECHO_SEND_MAX_TIMES; index++) { + ret = (int)unf_send_echo(lport, rport, v_link_delay); + if (ret != RETURN_OK) { + *v_link_delay = 0xffffffff; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, + UNF_MAJOR, + "fcping request failed [lport(0x%x)-> rport(0x%x)].\n", + v_port_id, v_nport_id); + + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, + UNF_MAJOR, + "fcping request succeed within %u us [lport(0x%x)->rport(0x%x)].\n", + *(unsigned int *)v_link_delay, v_port_id, + v_nport_id); + } + + msleep(1000); + } + + return ret; +} + +static int unf_cm_link_delay_get(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int link_delay = 0xffffffff; + unsigned int nport_id = 0xffffff; + unsigned int port_id = 0; + struct unf_adm_cmd *buff_in = NULL; + struct unf_adm_cmd *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->in_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)(v_input->buff_in); + buff_out = (struct unf_adm_cmd *)(v_input->buff_out); + port_id = v_lport->port_id; + nport_id = buff_in->arg[0]; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + ret = unf_cm_echo_test(port_id, nport_id, &link_delay); + if ((ret == RETURN_OK) && (link_delay != 0xffffffff)) { + buff_out->arg[0] = link_delay; + msg_head.size = sizeof(struct unf_admin_msg_head) + + sizeof(unsigned int) * 1; + } else { + msg_head.status = UNF_ADMIN_MSG_FAILED; + msg_head.size = sizeof(struct unf_admin_msg_head); + } + + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy((void *)buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static unsigned int unf_port_release_rport_index(struct unf_lport_s *v_lport, + void *v_input) +{ + unsigned int index = INVALID_VALUE32; + unsigned int *rport_index = NULL; + unsigned long flag = 0; + struct unf_rport_pool_s *rport_pool = NULL; + + UNF_CHECK_VALID(0x2370, UNF_FALSE, v_lport, return UNF_RETURN_ERROR); + + if (v_input) { + rport_index = (unsigned int *)v_input; + index = *rport_index; + if (index < v_lport->low_level_func.support_max_rport) { + rport_pool = &((struct unf_lport_s *)v_lport->root_lport)->rport_pool; + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, + flag); + if (test_bit((int)index, rport_pool->pul_rpi_bitmap)) + clear_bit((int)index, + rport_pool->pul_rpi_bitmap); + else + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) try to release a free rport index(0x%x)", + v_lport->port_id, index); + + spin_unlock_irqrestore( + &rport_pool->rport_free_pool_lock, + flag); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[warn]Port(0x%x) try to release a not exist rport index(0x%x)", + v_lport->port_id, index); + } + } + + return RETURN_OK; +} + +void *unf_lookup_lport_by_nport_id(void *v_lport, unsigned int v_nport_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1978, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + lport = lport->root_lport; + vport_pool = lport->vport_pool; + + if (v_nport_id == lport->nport_id) + return lport; + + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->nport_id == v_nport_id) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->nport_id == v_nport_id) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Port(0x%x) has no vport Nport ID(0x%x)", + lport->port_id, v_nport_id); + return NULL; +} + +static int unf_get_port_info(struct unf_lport_s *v_lport, + struct unf_lport_info *v_port_info) +{ + unsigned int act_speed = INVALID_VALUE32; + unsigned int cfg_speed = INVALID_VALUE32; + unsigned int cfg_topo = INVALID_VALUE32; + enum unf_act_topo_e act_topo = UNF_ACT_TOP_UNKNOWN; + struct unf_err_code_s fc_err_code; + unsigned int cfg_led_mode = INVALID_VALUE32; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2205, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2206, UNF_TRUE, v_port_info, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2207, UNF_TRUE, v_lport->fc_port, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID( + 0x2208, UNF_TRUE, + v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(cfg_speed); + UNF_REFERNCE_VAR(act_topo); + + memset(&fc_err_code, 0, sizeof(fc_err_code)); + + /* get port speed */ + cfg_speed = v_lport->low_level_func.lport_cfg_items.port_speed; + + if (v_lport->link_up == UNF_PORT_LINK_UP) + (void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_SPEED_ACT, (void *)&act_speed); + else + act_speed = UNF_PORT_SPEED_UNKNOWN; + + (void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_SPEED_CFG, (void *)&cfg_speed); + + if (v_lport->link_up == UNF_PORT_LINK_UP) + (void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_TOPO_ACT, (void *)&act_topo); + else + act_topo = UNF_ACT_TOP_UNKNOWN; + + (void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_TOPO_CFG, (void *)&cfg_topo); + + (void)v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_LED_STATE, (void *)&cfg_led_mode); + + v_port_info->port_id = v_lport->port_id; + v_port_info->options = v_lport->options; + v_port_info->b_start_work = global_lport_mgr.b_start_work; + v_port_info->phy_link = UNF_PORT_LINK_UP; + v_port_info->link_up = v_lport->link_up; + v_port_info->act_speed = act_speed; + v_port_info->cfg_speed = cfg_speed; + v_port_info->port_name = v_lport->port_name; + v_port_info->tape_support = + v_lport->low_level_func.lport_cfg_items.tape_support; + v_port_info->msi = 0; + v_port_info->ini_io_retry_timeout = 0; + v_port_info->support_max_npiv_num = + v_lport->low_level_func.support_max_npiv_num; + v_port_info->act_topo = act_topo; + v_port_info->port_topology = + v_lport->low_level_func.lport_cfg_items.port_topology; + v_port_info->fc_ser_max_speed = + v_lport->low_level_func.fc_ser_max_speed; + + if (unf_get_error_code_sum(v_lport, &fc_err_code) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get error code failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + v_port_info->loss_of_signal_count = fc_err_code.loss_of_signal_count; + v_port_info->bad_rx_char_count = fc_err_code.bad_rx_char_count; + v_port_info->loss_of_sync_count = fc_err_code.loss_of_sync_count; + v_port_info->link_fail_count = fc_err_code.link_fail_count; + v_port_info->rx_eo_fa_count = fc_err_code.rx_eo_fa_count; + v_port_info->dis_frame_count = fc_err_code.dis_frame_count; + v_port_info->bad_crc_count = fc_err_code.bad_crc_count; + v_port_info->proto_error_count = fc_err_code.proto_error_count; + v_port_info->chip_type = v_lport->low_level_func.chip_info.chip_type; + v_port_info->cfg_led_mode = cfg_led_mode; + + v_port_info->vport_num = 0; + + vport_pool = v_lport->vport_pool; + if (unlikely(!vport_pool)) + return RETURN_OK; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &v_lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + + v_port_info->vport_id[v_port_info->vport_num] = vport->port_id; + + v_port_info->vport_num = v_port_info->vport_num + 1; + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + return RETURN_OK; +} + +static int unf_get_vport_info(struct unf_lport_s *v_lport, + unsigned int v_vport_id, + struct unf_lport_info *v_port_info) +{ + unsigned char vport_index = INVALID_VALUE8; + struct unf_lport_s *vport = NULL; + + UNF_CHECK_VALID(0x2203, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2203, UNF_TRUE, v_port_info, return UNF_RETURN_ERROR); + + vport_index = (v_vport_id & PORTID_VPINDEX_MASK) >> PORTID_VPINDEX_SHIT; + if (unlikely(vport_index == 0)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]VPortId(0x%x) is not vport", v_vport_id); + + return UNF_RETURN_ERROR; + } + + vport = unf_cm_lookup_vport_by_vp_index(v_lport, vport_index); + if (unlikely(!vport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]VPortId(0x%x) can not be found", + v_vport_id); + + return UNF_RETURN_ERROR; + } + + v_port_info->port_id = vport->port_id; + v_port_info->port_name = vport->port_name; + v_port_info->nport_id = vport->nport_id; + v_port_info->options = 0; + + return RETURN_OK; +} + +static int unf_get_all_port_info(void *v_arg_in, void *v_arg_out) +{ + struct unf_lport_s *lport = NULL; + struct unf_get_allinfo_argout *arg_in = NULL; + unsigned int current_len = 0; + struct unf_lport_info *cur_lport_info = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + int ret = UNF_RETURN_ERROR; + unsigned int out_buf_len = 0; + char *out_buf = NULL; + struct hifc_adm_cmd_s *buff_in = NULL; + + UNF_CHECK_VALID(0x2203, UNF_TRUE, v_arg_in, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_arg_out); + + arg_in = (struct unf_get_allinfo_argout *)v_arg_in; + out_buf = (char *)arg_in->out_buf; + buff_in = (struct hifc_adm_cmd_s *)arg_in->in_buf; + lport = (struct unf_lport_s *)arg_in->lport; + + UNF_CHECK_VALID(0x2203, UNF_TRUE, out_buf, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2203, UNF_TRUE, buff_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2203, UNF_TRUE, lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, arg_in->in_size >= + sizeof(struct hifc_adm_cmd_s), return UNF_RETURN_ERROR); + + cur_lport_info = vmalloc(sizeof(struct unf_lport_info)); + if (!cur_lport_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) malloc memory fail", lport->port_id); + ((struct unf_admin_msg_head *)out_buf)->status = + UNF_ADMIN_MSG_FAILED; + return ret; + } + + memset(cur_lport_info, 0, sizeof(struct unf_lport_info)); + out_buf_len = arg_in->in_size; + msg_head.status = UNF_ADMIN_MSG_DONE; + *arg_in->out_size = out_buf_len; + + /* Storage info */ + current_len += sizeof(struct unf_admin_msg_head); + + if (lport->b_port_removing != UNF_TRUE) { + /* Cmd[3] is Vportid */ + if (buff_in->cmd[3] != 0) { + ret = unf_get_vport_info(lport, buff_in->cmd[3], + cur_lport_info); + } else { + ret = unf_get_port_info(lport, cur_lport_info); + } + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[err]Port(0x%x) get port information error", + lport->port_id); + + msg_head.status = UNF_ADMIN_MSG_FAILED; + msg_head.size = current_len; + memcpy(out_buf, &msg_head, + sizeof(struct unf_admin_msg_head)); + vfree(cur_lport_info); + return ret; + } + + if (out_buf_len < current_len + sizeof(struct unf_lport_info)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, + UNF_ERR, + "[warn]Allocated buff size (%u < %lu) is not enough", + out_buf_len, + current_len + sizeof(struct unf_lport_info)); + + /* Compatible for vport: return Lport info + * if tools version is not support npiv + */ + memcpy(out_buf + current_len, cur_lport_info, + out_buf_len - current_len); + + current_len = out_buf_len; + + } else { + memcpy(out_buf + current_len, cur_lport_info, + sizeof(struct unf_lport_info)); + current_len += sizeof(struct unf_lport_info); + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) is removing. Ref count 0x%x", + lport->port_id, atomic_read(&lport->lport_ref_cnt)); + + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + + msg_head.size = current_len; + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + vfree(cur_lport_info); + return ret; +} + +static int unf_cm_get_all_port_info(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + struct unf_get_allinfo_argout out = { 0 }; + int ret = UNF_RETURN_ERROR; + + out.out_buf = v_input->buff_out; + out.in_buf = v_input->buff_in; + out.out_size = v_input->out_size; + out.in_size = v_input->in_size; + out.lport = v_lport; + + ret = (int)unf_schedule_global_event((void *)&out, + UNF_GLOBAL_EVENT_SYN, + unf_get_all_port_info); + + return ret; +} + +static int unf_cm_port_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int mode = 0; /* 1:portreset 2:sfp on/off */ + int turn_on = 0; /* 0:sfp off 1:sfp on */ + unsigned int port_id = 0; + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->in_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + mode = buff_in->arg[0]; + port_id = v_lport->port_id; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + if (mode == 1) { + ret = unf_cm_reset_port(port_id); + + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + + } else if (mode == 2) { + turn_on = (int)buff_in->arg[1]; + + if ((turn_on == 0) || (turn_on == 1)) { + ret = unf_cm_sfp_switch(port_id, turn_on); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Switch sfp failed. Parameter(0x%x) error", + turn_on); + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static int unf_cm_topo_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int topo = 0; /* topology set */ + unsigned int port_id = 0; + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->in_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, *v_input->out_size >= + sizeof(struct unf_adm_cmd), return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + topo = buff_in->arg[0]; + port_id = v_lport->port_id; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + if ((topo == UNF_TOP_AUTO_MASK) || (topo == UNF_TOP_LOOP_MASK) || + (topo == UNF_TOP_P2P_MASK)) { + ret = unf_cm_set_port_topo(port_id, topo); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Set topo failed. Parameter(0x%x) error", topo); + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static int unf_cm_port_speed_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int port_speed = 0; + unsigned int port_id = 0; + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + struct unf_lport_s *lport = NULL; + int check_speed_flag = UNF_TRUE; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + lport = v_lport; + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + port_speed = buff_in->arg[0]; + port_id = v_lport->port_id; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + /* get and check sfp speed */ + if (unf_get_lport_current_info(lport) != RETURN_OK) { + msg_head.status = UNF_ADMIN_MSG_FAILED; + lport->low_level_func.sfp_speed = UNF_PORT_SFP_SPEED_ERR; + } + if (UNF_CHECK_CONFIG_SPEED_BY_SFSSPEED(lport->low_level_func.sfp_speed, + port_speed)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Set port speed failed. Speed (0x%x) is greater than SfpSpeed (0x%x)", + port_speed, lport->low_level_func.sfp_speed); + msg_head.status = UNF_ADMIN_MSG_FAILED; + check_speed_flag = UNF_FALSE; + } else { + if (lport->low_level_func.fc_ser_max_speed == + UNF_PORT_SPEED_32_G) { + check_speed_flag = + (port_speed == UNF_PORT_SPEED_AUTO) || + (port_speed == UNF_PORT_SPEED_8_G) || + (port_speed == UNF_PORT_SPEED_16_G) || + (port_speed == UNF_PORT_SPEED_32_G); + } else if (lport->low_level_func.fc_ser_max_speed == + UNF_PORT_SPEED_16_G) { + check_speed_flag = + (port_speed == UNF_PORT_SPEED_AUTO) || + (port_speed == UNF_PORT_SPEED_4_G) || + (port_speed == UNF_PORT_SPEED_8_G) || + (port_speed == UNF_PORT_SPEED_16_G); + } else if (lport->low_level_func.fc_ser_max_speed == + UNF_PORT_SPEED_8_G) { + check_speed_flag = + (port_speed == UNF_PORT_SPEED_AUTO) || + (port_speed == UNF_PORT_SPEED_2_G) || + (port_speed == UNF_PORT_SPEED_4_G) || + (port_speed == UNF_PORT_SPEED_8_G); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Board maxspeed is unknown"); + msg_head.status = UNF_ADMIN_MSG_FAILED; + check_speed_flag = UNF_FALSE; + } + } + + if (check_speed_flag) { + ret = unf_cm_set_port_speed(port_id, &port_speed); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static int unf_cm_set_vport(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + unsigned int mode = 0; + unsigned int index = 0; + unsigned int high32 = 0x2000286e; + unsigned int low32 = 0; + unsigned long long port_name = 0; + unsigned int port_id = 0; + + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + port_id = v_lport->port_id; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + mode = buff_in->arg[0]; + + switch (mode) { + case 1: + /* create vport with wwpn */ + low32 = buff_in->arg[1]; + port_name = ((unsigned long)high32 << 32) | low32; + + //lint -fallthrough + case 3: + /* create vport and autogeneration wwpn */ + ret = unf_npiv_conf(port_id, port_name); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + msleep(2000); + break; + + case 2: + /* delete vport by vport index */ + index = buff_in->arg[2]; + ret = unf_delete_vport(port_id, index); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + break; + + case 4: + /* delete all vport on Lport */ + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, + UNF_ERR, + "[err]Port(0x%x) can't find", port_id); + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else { + unf_destroy_all_vports(lport); + ret = RETURN_OK; + } + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Mode is unknown"); + msg_head.status = UNF_ADMIN_MSG_FAILED; + break; + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return (int)ret; +} + +static int unf_cm_port_info_get(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int topo_cfg = 0; + enum unf_act_topo_e topo = UNF_ACT_TOP_UNKNOWN; + unsigned int port_speed = 0; + unsigned int port_id = 0; + struct unf_adm_cmd *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + lport = v_lport; + port_id = v_lport->port_id; + buff_out = (struct unf_adm_cmd *)v_input->buff_out; + msg_head.status = UNF_ADMIN_MSG_DONE; + + ret = unf_cm_get_port_topo(port_id, &topo_cfg, &topo); + if (ret == RETURN_OK) { + ret = unf_cm_get_port_speed(port_id, &port_speed); + if (ret == RETURN_OK) { + buff_out->arg[0] = lport->port_id; + buff_out->arg[1] = topo_cfg; + buff_out->arg[2] = topo; + buff_out->arg[3] = port_speed; + buff_out->arg[4] = lport->link_up; + + msg_head.size = sizeof(struct unf_admin_msg_head) + + sizeof(unsigned int) * 5; + } else { + msg_head.status = UNF_ADMIN_MSG_FAILED; + msg_head.size = sizeof(struct unf_admin_msg_head); + } + } else { + msg_head.status = UNF_ADMIN_MSG_FAILED; + msg_head.size = sizeof(struct unf_admin_msg_head); + } + + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static int unf_get_port_sfp_info(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ +#define MIN_SFPINFO_LEN 512 + union unf_sfp_eeprome_info *sfp_info = NULL; + int ret = UNF_RETURN_ERROR; + unsigned int status = 0; + unsigned int sfp_type = 0; + unsigned int port_id = 0; + char *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(0x2203, UNF_TRUE, v_input, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= MIN_SFPINFO_LEN, + return UNF_RETURN_ERROR); + buff_out = v_input->buff_out; + port_id = v_lport->port_id; + + *v_input->out_size = MIN_SFPINFO_LEN; + msg_head.status = UNF_ADMIN_MSG_DONE; + + sfp_info = vmalloc(sizeof(union unf_sfp_eeprome_info)); + if (!sfp_info) + return UNF_RETURN_ERROR; + + memset(sfp_info, 0, sizeof(union unf_sfp_eeprome_info)); + + ret = unf_cm_get_sfp_info(port_id, &status, sfp_info, &sfp_type); + if (ret == UNF_RETURN_ERROR || (status != DRV_CABLE_CONNECTOR_OPTICAL)) + msg_head.status = UNF_ADMIN_MSG_FAILED; + + msg_head.size = sizeof(struct unf_admin_msg_head); + memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + memcpy((buff_out + msg_head.size), + &sfp_info->sfp_info, sizeof(struct unf_sfp_info_s)); + + vfree(sfp_info); + + return ret; +} + +static int unf_cm_clear_error_code_sum(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = RETURN_OK; + void *out_buf = NULL; + unsigned int port_id = 0; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + port_id = v_lport->port_id; + msg_head.status = UNF_ADMIN_MSG_DONE; + + ret = unf_cm_clear_port_error_code_sum(port_id); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + return ret; +} + +static int unf_cm_bbscn_set(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int bbscn_val = 0; + unsigned int port_id = 0; + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, + UNF_TRUE, v_input, return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + port_id = v_lport->port_id; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + out_buf, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + buff_in, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + bbscn_val = buff_in->arg[1]; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]BBSCN value (0x%x)", bbscn_val); + msg_head.status = UNF_ADMIN_MSG_DONE; + if (bbscn_val <= UNF_MAX_BBSCN_VALUE) { + ret = unf_cm_set_port_bbscn(port_id, bbscn_val); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]BBSCN value is invalid(0x%x)", bbscn_val); + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static void unf_fc_host_counter(struct unf_lport_s *v_lport, + struct hifc_adm_dfx_cmd_s *v_buff_out) +{ + unsigned int scsi_id = 0; + unsigned int index = 0; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buff_out, return); + + scsi_image_table = &v_lport->rport_scsi_table; + v_buff_out->unresult.host_cnt.host_num = + v_lport->host_info.p_scsi_host->host_no; + v_buff_out->unresult.host_cnt.port_id = v_lport->port_id; + v_buff_out->unresult.host_cnt.scsi_session_add_success = + atomic_read(&v_lport->scsi_session_add_success); + v_buff_out->unresult.host_cnt.scsi_session_add_failed = + atomic_read(&v_lport->scsi_session_add_failed); + v_buff_out->unresult.host_cnt.scsi_session_del_success = + atomic_read(&v_lport->scsi_session_del_success); + v_buff_out->unresult.host_cnt.scsi_session_del_failed = + atomic_read(&v_lport->scsi_session_del_failed); + v_buff_out->unresult.host_cnt.device_alloc = + atomic_read(&v_lport->device_alloc); + v_buff_out->unresult.host_cnt.device_destroy = + atomic_read(&v_lport->device_destroy); + v_buff_out->unresult.host_cnt.session_loss_tmo = + atomic_read(&v_lport->session_loss_tmo); + v_buff_out->unresult.host_cnt.alloc_scsi_id = + atomic_read(&v_lport->alloc_scsi_id); + v_buff_out->unresult.host_cnt.reuse_scsi_id = + atomic_read(&v_lport->reuse_scsi_id); + v_buff_out->unresult.host_cnt.resume_scsi_id = + atomic_read(&v_lport->resume_scsi_id); + v_buff_out->unresult.host_cnt.add_start_work_failed = + atomic_read(&v_lport->add_start_work_failed); + v_buff_out->unresult.host_cnt.add_closing_work_failed = + atomic_read(&v_lport->add_closing_work_failed); + + for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID / 2; scsi_id++) { + index = scsi_id * 2; + v_buff_out->unresult.host_cnt.session_state[scsi_id].session1 = + (unsigned char)atomic_read(&scsi_image_table->wwn_rport_info_table[index].en_scsi_state); + + index = scsi_id * 2 + 1; + v_buff_out->unresult.host_cnt.session_state[scsi_id].session2 = + (unsigned char)atomic_read(&scsi_image_table->wwn_rport_info_table[index].en_scsi_state); + } + + for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID; scsi_id++) { + if (!scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + continue; + v_buff_out->unresult.host_cnt.abort_io += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_ABORT_IO_TYPE]); + v_buff_out->unresult.host_cnt.device_reset += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_DEVICE_RESET_TYPE]); + v_buff_out->unresult.host_cnt.target_reset += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_TARGET_RESET_TYPE]); + v_buff_out->unresult.host_cnt.bus_reset += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_BUS_RESET_TYPE]); + v_buff_out->unresult.host_cnt.virtual_reset += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle[UNF_SCSI_VIRTUAL_RESET_TYPE]); + v_buff_out->unresult.host_cnt.abort_io_result += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_ABORT_IO_TYPE]); + v_buff_out->unresult.host_cnt.device_reset_result += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_DEVICE_RESET_TYPE]); + v_buff_out->unresult.host_cnt.target_reset_result += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_TARGET_RESET_TYPE]); + v_buff_out->unresult.host_cnt.bus_reset_result += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_BUS_RESET_TYPE]); + v_buff_out->unresult.host_cnt.virtual_reset_result += + atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->error_handle_result[UNF_SCSI_VIRTUAL_RESET_TYPE]); + } +} + +static void unf_fc_session_counter(struct unf_lport_s *v_lport, + unsigned int scsi_id, + struct hifc_adm_dfx_cmd_s *v_buff_out) +{ + struct unf_wwpn_rport_info_s *rport_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buff_out, return); + + rport_info = &v_lport->rport_scsi_table.wwn_rport_info_table[scsi_id]; + v_buff_out->unresult.session_cnt.port_id = v_lport->port_id; + v_buff_out->unresult.session_cnt.host_id = + v_lport->host_info.p_scsi_host->host_no; + + if (rport_info->dfx_counter) { + v_buff_out->unresult.session_cnt.target_busy = + atomic64_read(&rport_info->dfx_counter->target_busy); + v_buff_out->unresult.session_cnt.host_busy = + atomic64_read(&rport_info->dfx_counter->host_busy); + v_buff_out->unresult.session_cnt.abort_io = + atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_ABORT_IO_TYPE]); + v_buff_out->unresult.session_cnt.device_reset = + atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_DEVICE_RESET_TYPE]); + v_buff_out->unresult.session_cnt.target_reset = + atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_TARGET_RESET_TYPE]); + v_buff_out->unresult.session_cnt.bus_reset = + atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_BUS_RESET_TYPE]); + v_buff_out->unresult.session_cnt.virtual_reset = + atomic_read(&rport_info->dfx_counter->error_handle[UNF_SCSI_VIRTUAL_RESET_TYPE]); + + v_buff_out->unresult.session_cnt.abort_io_result = + atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_ABORT_IO_TYPE]); + v_buff_out->unresult.session_cnt.device_reset_result = + atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_DEVICE_RESET_TYPE]); + v_buff_out->unresult.session_cnt.target_reset_result = + atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_TARGET_RESET_TYPE]); + v_buff_out->unresult.session_cnt.bus_reset_result = + atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_BUS_RESET_TYPE]); + v_buff_out->unresult.session_cnt.virtual_reset_result = + atomic_read(&rport_info->dfx_counter->error_handle_result[UNF_SCSI_VIRTUAL_RESET_TYPE]); + + v_buff_out->unresult.session_cnt.device_alloc = + atomic_read(&rport_info->dfx_counter->device_alloc); + v_buff_out->unresult.session_cnt.device_destroy = + atomic_read(&rport_info->dfx_counter->device_destroy); + } + + v_buff_out->unresult.session_cnt.target_id = rport_info->target_id; + + if ((rport_info->wwpn != INVALID_WWPN) && (rport_info->rport)) { + v_buff_out->unresult.session_cnt.remote_port_wwpn = + rport_info->wwpn; + v_buff_out->unresult.session_cnt.remote_port_nportid = + rport_info->rport->nport_id; + v_buff_out->unresult.session_cnt.scsi_state = + atomic_read(&rport_info->en_scsi_state); + v_buff_out->unresult.session_cnt.remote_port_state = + rport_info->rport->rp_state; + v_buff_out->unresult.session_cnt.remote_port_scsiid = + rport_info->rport->scsi_id; + v_buff_out->unresult.session_cnt.remote_port_index = + rport_info->rport->rport_index; + + if (rport_info->rport->lport) { + v_buff_out->unresult.session_cnt.local_port_wwpn = + rport_info->rport->lport->port_name; + v_buff_out->unresult.session_cnt.local_port_nportid = + rport_info->rport->local_nport_id; + v_buff_out->unresult.session_cnt.local_port_ini_state = + rport_info->rport->lport_ini_state; + v_buff_out->unresult.session_cnt.local_port_state = + rport_info->rport->lport->en_states; + } + } +} + +static int unf_fc_session_scsi_cmd_in( + struct unf_hinicam_pkg *v_input, + struct unf_rport_scsi_id_image_s *scsi_image_table) +{ + unsigned int scsi_id = 0; + unsigned int scsi_cmd_type = 0; + int ret = RETURN_OK; + + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct unf_adm_cmd *buff_in = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table, + return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + + scsi_id = buff_in->arg[2]; + scsi_cmd_type = buff_in->arg[3]; + + if (scsi_id >= UNF_MAX_SCSI_ID || scsi_cmd_type >= UNF_MAX_SCSI_CMD) + return UNF_RETURN_ERROR; + + if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + buff_out->unresult.scsi_cmd_in = + atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->scsi_cmd_cnt[scsi_cmd_type]); + + return ret; +} + +static int unf_fc_host_scsi_cmd_in_total( + struct unf_hinicam_pkg *v_input, + struct unf_rport_scsi_id_image_s *scsi_image_table) +{ + unsigned int scsi_id = 0; + unsigned int scsi_cmd_type = 0; + + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct unf_adm_cmd *buff_in = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table, + return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + + scsi_cmd_type = buff_in->arg[3]; + + if (scsi_cmd_type >= UNF_MAX_SCSI_CMD) + return UNF_RETURN_ERROR; + + for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID; scsi_id++) { + if (!scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + continue; + buff_out->unresult.scsi_cmd_in += + atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->scsi_cmd_cnt[scsi_cmd_type]); + } + + return RETURN_OK; +} + +static int unf_fc_host_scsi_cmd_done_total( + struct unf_hinicam_pkg *v_input, + struct unf_rport_scsi_id_image_s *scsi_image_table) +{ + unsigned int scsi_id = 0; + unsigned int io_return_value = 0; + int ret = RETURN_OK; + + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct unf_adm_cmd *buff_in = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table, + return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + + io_return_value = buff_in->arg[3]; + + if (io_return_value >= UNF_MAX_IO_RETURN_VALUE) + return UNF_RETURN_ERROR; + + for (scsi_id = 0; scsi_id < UNF_MAX_SCSI_ID; scsi_id++) { + if (!scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + continue; + buff_out->unresult.scsi_cmd_done += + atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->io_done_cnt[io_return_value]); + } + + return ret; +} + +static int unf_fc_session_scsi_cmd_done( + struct unf_hinicam_pkg *v_input, + struct unf_rport_scsi_id_image_s *scsi_image_table) +{ + unsigned int scsi_id = 0; + unsigned int io_return_value = 0; + int ret = RETURN_OK; + + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct unf_adm_cmd *buff_in = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsi_image_table, + return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + + scsi_id = buff_in->arg[2]; + io_return_value = buff_in->arg[3]; + + if (scsi_id >= UNF_MAX_SCSI_ID || + io_return_value >= UNF_MAX_IO_RETURN_VALUE) + return UNF_RETURN_ERROR; + + if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + buff_out->unresult.scsi_cmd_done = + atomic64_read(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->io_done_cnt[io_return_value]); + + return ret; +} + +static int unf_get_io_dfx_statistics(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = RETURN_OK; + unsigned int counter_mode = 0; + struct hifc_adm_dfx_cmd_s *buff_out = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int scsi_id = 0; + struct unf_lport_s *vport = NULL; + unsigned int buff_flag = 0; + + buff_flag = (!v_input) || (!v_input->buff_out) || + (!v_input->buff_in) || (!v_lport); + if (buff_flag) + return UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct hifc_adm_dfx_cmd_s), + return UNF_RETURN_ERROR); + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct hifc_adm_dfx_cmd_s *)v_input->buff_out; + msg_head.status = UNF_ADMIN_MSG_DONE; + + vport = unf_cm_lookup_vport_by_vp_index( + v_lport, (unsigned short)(buff_in->arg[4])); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, vport, + return UNF_RETURN_ERROR); + + scsi_image_table = &vport->rport_scsi_table; + FC_DRIVE_ACTION_CHECK((!scsi_image_table->wwn_rport_info_table), + (msg_head.status = UNF_ADMIN_MSG_FAILED), + (ret = UNF_RETURN_ERROR), + goto err); + + counter_mode = buff_in->arg[1]; + switch (counter_mode) { + case FC_HOST_COUNTER: + unf_fc_host_counter(vport, buff_out); + break; + case FC_SESSION_SCSI_CMD_IN: + ret = unf_fc_session_scsi_cmd_in(v_input, scsi_image_table); + break; + case FC_HOST_SCSI_CMD_IN_TOTAL: + ret = unf_fc_host_scsi_cmd_in_total(v_input, scsi_image_table); + break; + case FC_HOST_SCSI_CMD_DONE_TOTAL: + ret = unf_fc_host_scsi_cmd_done_total(v_input, + scsi_image_table); + break; + case FC_SESSION_SCSI_CMD_DONE: + ret = unf_fc_session_scsi_cmd_done(v_input, scsi_image_table); + break; + case FC_SESSION_COUNTER: + scsi_id = buff_in->arg[2]; + FC_DRIVE_ACTION_CHECK((scsi_id >= UNF_MAX_SCSI_ID), + (msg_head.status = UNF_ADMIN_MSG_FAILED), + (ret = UNF_RETURN_ERROR), + goto err); + unf_fc_session_counter(vport, scsi_id, buff_out); + break; + default: + msg_head.status = UNF_ADMIN_MSG_FAILED; + ret = UNF_RETURN_ERROR; + break; + } + + if (ret != RETURN_OK) + return ret; + +err: + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct hifc_adm_dfx_cmd_s); + memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +static int unf_cm_switch_dif(unsigned int v_option, + unsigned int v_dix_ip_checksum) +{ +#define UNF_WAIT_IO_COMPLETE_TIME_MS 5000 +#define UNF_WAIT_ONE_TIME_MS 100 +#define UNF_LOOP_TIMES (UNF_WAIT_IO_COMPLETE_TIME_MS / UNF_WAIT_ONE_TIME_MS) + + int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + int enable_dif; + unsigned int index; + + dix_flag = v_dix_ip_checksum ? UNF_TRUE : UNF_FALSE; + + enable_dif = (v_option >= UNF_ENABLE_DIF_DIX_PROT && + v_option <= UNF_ENABLE_DIX_PROT); + if (enable_dif) { + dif_sgl_mode = UNF_TRUE; + hifc_dif_enable = UNF_TRUE; + } + + switch (v_option) { + case UNF_DIF_ACTION_NONE: + dif_sgl_mode = UNF_FALSE; + hifc_dif_enable = UNF_FALSE; + hifc_dif_type = 0; + hifc_guard = 0; + break; + + case UNF_ENABLE_DIF_DIX_PROT: + hifc_dif_type = SHOST_DIF_TYPE1_PROTECTION | + SHOST_DIX_TYPE1_PROTECTION; + break; + + case UNF_ENABLE_DIF_PROT: + hifc_dif_type = SHOST_DIF_TYPE1_PROTECTION; + dif_sgl_mode = UNF_FALSE; + break; + + case UNF_ENABLE_DIX_PROT: + hifc_dif_type = SHOST_DIX_TYPE0_PROTECTION; + break; + + default: + return UNF_ADMIN_MSG_FAILED; + } + + /* 1. Close Lport's SFP */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head, + entry_lport) { + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + ret = unf_cm_sfp_switch(lport->port_id, UNF_FALSE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) close SFP failed in DIF switch", + lport->port_id); + + return UNF_ADMIN_MSG_FAILED; + } + for (index = 0; index < UNF_LOOP_TIMES; index++) { + if (unf_busy_io_completed(lport) == UNF_TRUE) + break; + msleep(UNF_WAIT_ONE_TIME_MS); + } + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, + flags); + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + /* 2. UnRegister the SCSI host of LPort, including its Vports */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head, + entry_lport) { + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + unf_unregister_scsi_host(lport); + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, + flags); + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + /* 3. Register the SCSI host of LPort, including its Vports */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head, + entry_lport) { + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + if (unf_register_scsi_host(lport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[warn]Port(0x%x) register scsi host failed in DIF switch", + lport->port_id); + return UNF_ADMIN_MSG_FAILED; + } + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, + flags); + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + /* 4. Open Lport's SFP */ + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); + list_for_each_entry(lport, &global_lport_mgr.list_lport_list_head, + entry_lport) { + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, + flags); + + ret = unf_cm_sfp_switch(lport->port_id, UNF_TRUE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) reopen SFP failed in DIF switch", + lport->port_id); + + return UNF_ADMIN_MSG_FAILED; + } + + spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, + flags); + } + spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); + + return UNF_ADMIN_MSG_DONE; +} + +static int unf_cm_switch_app_ref_escape(unsigned int v_option) +{ + switch (v_option) { + case UNF_APP_REF_ESC_BOTH_NOT_CHECK: + dif_app_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK; + dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK; + break; + + case UNF_APP_ESC_CHECK: + dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK; + break; + + case UNF_REF_ESC_CHECK: + dif_app_esc_check = HIFC_DIF_APP_REF_ESC_NOT_CHECK; + dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + break; + + case UNF_APP_REF_ESC_BOTH_CHECK: + dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + break; + + default: + dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; + break; + } + + return UNF_ADMIN_MSG_DONE; +} + +static int unf_cm_select_dif_mode(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + unsigned int dif_mode = 0; + unsigned int option = 0; + unsigned int dix_ip_checksum = 0; + struct unf_adm_cmd *buff_in = NULL; + struct unf_adm_cmd *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_out, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_in, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct unf_adm_cmd *)v_input->buff_out; + msg_head.status = UNF_ADMIN_MSG_DONE; + dif_mode = buff_in->arg[0]; + option = buff_in->arg[1]; + dix_ip_checksum = buff_in->arg[2]; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]DIF mode(0x%x) sub option(0x%x 0x%x)", + dif_mode, option, dix_ip_checksum); + + switch (dif_mode) { + case UNF_SWITCH_DIF_DIX: + msg_head.status = + (unsigned short)unf_cm_switch_dif(option, + dix_ip_checksum); + break; + + case UNF_APP_REF_ESCAPE: + msg_head.status = + (unsigned short)unf_cm_switch_app_ref_escape(option); + break; + + default: + msg_head.status = UNF_ADMIN_MSG_FAILED; + goto end; + } + +end: + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + + return RETURN_OK; +} + +static int unf_cm_set_dif(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + unsigned int dif_switch = 0; + struct unf_adm_cmd *buff_in = NULL; + struct unf_adm_cmd *buff_out = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_out, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input->buff_in, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + buff_in = (struct unf_adm_cmd *)v_input->buff_in; + buff_out = (struct unf_adm_cmd *)v_input->buff_out; + msg_head.status = UNF_ADMIN_MSG_DONE; + dif_switch = (buff_in->arg[0]) ? + UNF_ENABLE_DIF_DIX_PROT : UNF_DIF_ACTION_NONE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]DIF switch is 0x%x", dif_switch); + + if (dif_switch == UNF_ENABLE_DIF_DIX_PROT) + msg_head.status = (unsigned short)unf_cm_switch_dif(dif_switch, + UNF_ENABLE_IP_CHECKSUM); + else + msg_head.status = (unsigned short)unf_cm_switch_dif(dif_switch, + UNF_DISABLE_IP_CHECKSUM); + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(buff_out, &msg_head, sizeof(struct unf_admin_msg_head)); + + return RETURN_OK; +} + +static unsigned int unf_save_port_info(struct unf_lport_s *lport, + void *save_info_addr) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x2271, UNF_TRUE, save_info_addr, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x2271, UNF_TRUE, lport, return UNF_RETURN_ERROR); + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + + return ret; + } + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SAVE_HBA_INFO, (void *)save_info_addr); + + return ret; +} + +static unsigned int unf_save_port_base_info(struct unf_lport_s *lport, + void *v_save_info) +{ + struct unf_save_info_head_s *save_info_head = v_save_info; + struct unf_port_info_entry_s *sava_port_entry = NULL; + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + unsigned int cfg_speed = 0; + unsigned int topo_cfg = 0; + int fec = UNF_FALSE; + int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, save_info_head, + return UNF_RETURN_ERROR); + + save_info_head->opcode = 0; + /* write information to up */ + save_info_head->type = UNF_PORT_BASE_INFO; /* port base info */ + save_info_head->entry_num = 1; + save_info_head->next = 0xffff; + + sava_port_entry = (struct unf_port_info_entry_s *) + ((void *)(save_info_head + 1)); + + port_mgr = &lport->low_level_func.port_mgr_op; + if (!port_mgr->pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "Port(0x%x)'s corresponding function is NULL.", + lport->nport_id); + + return UNF_RETURN_ERROR; + } + + /* get Bbscn */ + sava_port_entry->bb_scn = unf_low_level_bbscn(lport); + + /* get speed */ + port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_SPEED_CFG, + (void *)&cfg_speed); + sava_port_entry->speed = cfg_speed; + + /* get topo */ + port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_TOPO_CFG, + (void *)&topo_cfg); + sava_port_entry->topo = topo_cfg; + + /* get fec */ + port_mgr->pfn_ll_port_config_get(lport->fc_port, + UNF_PORT_CFG_GET_FEC, + (void *)&fec); + sava_port_entry->fec = fec; + + ret = (int)unf_save_port_info(lport, v_save_info); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) send mailbox fail", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +unsigned int unf_cm_save_port_info(unsigned int v_port_id) +{ + unsigned int port_id = v_port_id; + struct unf_lport_s *lport = NULL; + struct unf_save_info_head_s *save_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) can not be found", port_id); + + return ret; + } + + save_info = vmalloc(SAVE_PORT_INFO_LEN); + if (!save_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Can't alloc buffer for saving port info"); + + return ret; + } + + /* 1 clean flush */ + memset(save_info, 0, SAVE_PORT_INFO_LEN); + + save_info->opcode = 2; /* notify up to clean flush */ + save_info->type = 0xf; + save_info->entry_num = 0; + save_info->next = 0xffff; + + ret = unf_save_port_info(lport, save_info); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[warn]Port(0x%x) send mailbox fail", lport->port_id); + + vfree(save_info); + + return ret; + } + + /* 2 save port base information */ + memset(save_info, 0, SAVE_PORT_INFO_LEN); + + ret = unf_save_port_base_info(lport, save_info); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) save port base information failed", + lport->port_id); + + vfree(save_info); + + return ret; + } + + vfree(save_info); + + return ret; +} + +static void unf_handle_port_base_info(struct unf_lport_s *lport, + struct unf_port_info_entry_s *v_save_info) +{ + struct unf_port_info_entry_s *sava_port_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_save_info, return); + + sava_port_entry = v_save_info; + + UNF_CHECK_VALID(INVALID_VALUE32, + UNF_TRUE, + (sava_port_entry->topo == UNF_TOP_LOOP_MASK) || + (sava_port_entry->topo == UNF_TOP_P2P_MASK) || + (sava_port_entry->topo == UNF_TOP_AUTO_MASK), + return); + + if (!lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x)'s corresponding function is NULL.", + lport->port_id); + return; + } + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + lport->fc_port, + UNF_PORT_CFG_SET_HBA_BASE_INFO, (void *)sava_port_entry); + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Cannot set port base info"); + return; + } + + /* update bbsn cfg to Lport */ + lport->low_level_func.lport_cfg_items.bb_scn = sava_port_entry->bb_scn; + + lport->low_level_func.lport_cfg_items.port_topology = + sava_port_entry->topo; +} + +static unsigned int unf_recovery_save_info(struct unf_lport_s *lport, + void *v_save_info, + unsigned char v_type) +{ + struct unf_save_info_head_s *save_info_head = v_save_info; + void *info_entry = NULL; + int ret = 0; + unsigned short next_flag = 0; + unsigned char entry_num = 0; + unsigned char index = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, save_info_head, + return UNF_RETURN_ERROR); + + do { + memset(save_info_head, 0, SAVE_PORT_INFO_LEN); + save_info_head->opcode = 1; + /* read information from up */ + save_info_head->type = v_type; + /* vport[qos] info */ + save_info_head->entry_num = 0xff; + save_info_head->next = next_flag; + + ret = (int)unf_save_port_info(lport, save_info_head); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, + UNF_WARN, + "[warn]Port(0x%x) send mailbox fail", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + next_flag = (unsigned short)save_info_head->next; + entry_num = (unsigned char)save_info_head->entry_num; + info_entry = save_info_head + 1; + + for (index = 0; index < entry_num; index++) { + switch (v_type) { + case UNF_PORT_BASE_INFO: + unf_handle_port_base_info(lport, info_entry); + info_entry = ((struct unf_port_info_entry_s *) + info_entry) + 1; + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_EQUIP_ATT, + UNF_ERR, + "[err]Port(0x%x) handle message failed", + lport->port_id); + return UNF_RETURN_ERROR; + } + } + + } while (next_flag != 0xffff); + + return RETURN_OK; +} + +unsigned int unf_cm_get_save_info(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = NULL; + void *save_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, + return UNF_RETURN_ERROR); + + lport = v_lport; + save_info = vmalloc(SAVE_PORT_INFO_LEN); + if (!save_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Can't alloc buffer for saving port info"); + + return ret; + } + + /* 1 get port base information */ + ret = unf_recovery_save_info(lport, save_info, UNF_PORT_BASE_INFO); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[warn]Port(0x%x) send mailbox fail", lport->port_id); + + vfree(save_info); + + return ret; + } + + vfree(save_info); + + return ret; +} + +int unf_get_link_lose_tmo(struct unf_lport_s *v_lport) +{ + unsigned int tmo_value = 0; + + if (!v_lport) + return UNF_LOSE_TMO; + + tmo_value = atomic_read(&v_lport->link_lose_tmo); + + if (!tmo_value) + tmo_value = UNF_LOSE_TMO; + + return (int)tmo_value; +} + +int unf_get_link_lose_tmo_from_up(struct unf_lport_s *v_lport, + struct unf_flash_link_tmo_s *v_link_tmo) +{ + int ret = UNF_RETURN_ERROR; + struct unf_flash_data_s flash_data; + + if (!v_lport || !v_link_tmo || (sizeof(struct unf_flash_data_s) + > HIFC_FLASH_DATA_MAX_LEN)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]get flas link tmo param check fail"); + return ret; + } + memset(&flash_data, 0, sizeof(struct unf_flash_data_s)); + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]link tmo fun null"); + return ret; + } + if (v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_FLASH_DATA_INFO, &flash_data) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_KEVENT, + "[warn]get link tmo from up fail"); + return ret; + } + ret = RETURN_OK; + memcpy(v_link_tmo, &flash_data.link_tmo, HIFC_FLASH_LINK_TMO_MAX_LEN); + + return ret; +} + +void unf_init_link_lose_tmo(struct unf_lport_s *v_lport) +{ + struct unf_flash_link_tmo_s flash_link_tmo; + unsigned int tmo; + + memset(&flash_link_tmo, 0, sizeof(struct unf_flash_link_tmo_s)); + + if (!v_lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[warn]int link tmo param check fail"); + return; + } + if ((unf_get_link_lose_tmo_from_up(v_lport, &flash_link_tmo) == + RETURN_OK) && + (flash_link_tmo.writeflag == HIFC_MGMT_TMO_MAGIC_NUM)) { + tmo = (((unsigned int)flash_link_tmo.link_tmo3 << 24) | + ((unsigned int)flash_link_tmo.link_tmo2 << 16) | + ((unsigned int)flash_link_tmo.link_tmo1 << 8) | + flash_link_tmo.link_tmo0); + if (tmo > 600) + unf_set_link_lose_tmo(v_lport, UNF_LOSE_TMO); + else + atomic_set(&v_lport->link_lose_tmo, (int)tmo); + } else { + unf_set_link_lose_tmo(v_lport, UNF_LOSE_TMO); + } +} + +unsigned int unf_register_scsi_host(struct unf_lport_s *v_lport) +{ + struct unf_host_param_s host_param = { 0 }; + unf_scsi_host_s **p_scsi_host = NULL; + struct unf_lport_cfg_item_s *lport_cfg_items = NULL; + + UNF_CHECK_VALID(0x1359, TRUE, v_lport, return UNF_RETURN_ERROR); + + /* Point to -->> L_port->Scsi_host */ + p_scsi_host = &v_lport->host_info.p_scsi_host; + + lport_cfg_items = &v_lport->low_level_func.lport_cfg_items; + host_param.can_queue = (int)lport_cfg_items->max_queue_depth; + + /* Performance optimization */ + host_param.cmnd_per_lun = UNF_MAX_CMND_PER_LUN; + + host_param.sg_table_size = UNF_MAX_DMA_SEGS; + host_param.max_id = UNF_MAX_TARGET_NUMBER; + host_param.max_lun = UNF_DEFAULT_MAX_LUN; + host_param.max_channel = UNF_MAX_BUS_CHANNEL; + host_param.max_cmnd_len = UNF_MAX_SCSI_CMND_LEN; /* CDB-16 */ + host_param.dma_boundary = UNF_DMA_BOUNDARY; + host_param.max_sectors = UNF_MAX_SECTORS; + host_param.port_id = v_lport->port_id; + host_param.lport = v_lport; + host_param.pdev = &v_lport->low_level_func.dev->dev; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) allocate scsi host: can queue(%u), command performance LUN(%u), max lun(%u)", + v_lport->port_id, host_param.can_queue, + host_param.cmnd_per_lun, host_param.max_lun); + + if (unf_alloc_scsi_host(p_scsi_host, &host_param) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) allocate scsi host failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) allocate scsi host(0x%x) succeed", + v_lport->port_id, UNF_GET_SCSI_HOST_ID(*p_scsi_host)); + + return RETURN_OK; +} + +void unf_unregister_scsi_host(struct unf_lport_s *v_lport) +{ + unf_scsi_host_s *p_scsi_host = NULL; + unsigned int host_no = 0; + + UNF_REFERNCE_VAR(p_scsi_host); + UNF_CHECK_VALID(0x1360, TRUE, v_lport, return); + + p_scsi_host = v_lport->host_info.p_scsi_host; + + if (p_scsi_host) { + host_no = UNF_GET_SCSI_HOST_ID(p_scsi_host); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]Port(0x%x) starting unregister scsi host(0x%x)", + v_lport->port_id, host_no); + + unf_free_scsi_host(p_scsi_host); + /* can`t set p_scsi_host for NULL, + * since it does`t alloc by itself + */ + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[warn]Port(0x%x) unregister scsi host, invalid ScsiHost ", + v_lport->port_id); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]Port(0x%x) unregister scsi host(0x%x) succeed", + v_lport->port_id, host_no); + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST; + + UNF_REFERNCE_VAR(p_scsi_host); + UNF_REFERNCE_VAR(host_no); +} + +unsigned int unf_cm_clear_flush(unsigned int v_port_id) +{ + unsigned int port_id = v_port_id; + struct unf_lport_s *lport = NULL; + struct unf_save_info_head_s *save_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + lport = unf_find_lport_by_port_id(port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) can not be found", port_id); + + return ret; + } + + save_info = vmalloc(SAVE_PORT_INFO_LEN); + if (!save_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Can't alloc buffer for saving port info"); + + return ret; + } + + /* 1 clean flush */ + memset(save_info, 0, SAVE_PORT_INFO_LEN); + + save_info->opcode = 2; /* notify up to clean flush */ + save_info->type = 0xf; + save_info->entry_num = 0; + save_info->next = 0xffff; + + ret = unf_save_port_info(lport, save_info); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[warn]Port(0x%x) send mailbox fail", lport->port_id); + + vfree(save_info); + + return ret; + } + + vfree(save_info); + + return ret; +} + +static int unf_cm_save_data_mode(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input) +{ + int ret = UNF_RETURN_ERROR; + unsigned int save_data_mode = 0; + unsigned int port_id = 0; + void *out_buf = NULL; + struct unf_adm_cmd *buff_in = NULL; + struct unf_admin_msg_head msg_head = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + out_buf = v_input->buff_out; + buff_in = v_input->buff_in; + port_id = v_lport->port_id; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, out_buf, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, buff_in, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_input->in_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + *v_input->out_size >= sizeof(struct unf_adm_cmd), + return UNF_RETURN_ERROR); + + save_data_mode = buff_in->arg[0]; + + msg_head.status = UNF_ADMIN_MSG_DONE; + + if (save_data_mode == UNF_SAVA_INFO_MODE) { + ret = (int)unf_cm_save_port_info(port_id); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else if (save_data_mode == UNF_CLEAN_INFO_MODE) { + ret = (int)unf_cm_clear_flush(port_id); + if (ret != RETURN_OK) + msg_head.status = UNF_ADMIN_MSG_FAILED; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_MAJOR, + "[err]This mode(0x%x) is unknown", save_data_mode); + msg_head.status = UNF_ADMIN_MSG_FAILED; + } + + msg_head.size = sizeof(struct unf_admin_msg_head); + *v_input->out_size = sizeof(struct unf_adm_cmd); + memcpy(out_buf, &msg_head, sizeof(struct unf_admin_msg_head)); + + return ret; +} + +int unf_cmd_adm_handler(void *v_lport, struct unf_hinicam_pkg *v_input) +{ + struct unf_lport_s *lport = NULL; + int ret = UNF_RETURN_ERROR; + enum unf_msg_format_e msg_formate; + unsigned int index = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_input, + return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + msg_formate = v_input->msg_format; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Enter HIFC_Adm, msg_formate=0x%x, 0x%x", + msg_formate, *v_input->out_size); + + /* hifcadm event */ + while (index < (sizeof(unf_hifcadm_action) / + sizeof(struct unf_hifcadm_action_s))) { + if ((msg_formate == unf_hifcadm_action[index].hifc_action) && + unf_hifcadm_action[index].fn_unf_hifc_action) { + ret = unf_hifcadm_action[index].fn_unf_hifc_action(lport, v_input); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, + UNF_WARN, + "[warn]Port(0x%x) process up msg(0x%x) failed", + lport->port_id, msg_formate); + } + return ret; + } + index++; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_KEVENT, + "[event]Port(0x%x) not support adm cmd, msg type(0x%x) ", + lport->port_id, msg_formate); + + return ret; +} diff --git a/drivers/scsi/huawei/hifc/unf_portman.h b/drivers/scsi/huawei/hifc/unf_portman.h new file mode 100644 index 000000000000..6c6083889167 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_portman.h @@ -0,0 +1,305 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_PORT_MAN_H__ +#define __UNF_PORT_MAN_H__ + +#define UNF_LPORT_POLL_TIMER ((unsigned int)(1 * 1000)) + +#define UNF_MAX_BBSCN_VALUE 14 +#define UNF_SAVA_INFO_MODE 0 +#define UNF_CLEAN_INFO_MODE 1 + +#define FC_DRIVE_ACTION_CHECK(condition, fail_do0, fail_do1, return) \ + do { \ + if (condition) { \ + fail_do0; \ + fail_do1; \ + return; \ + } \ + } while (0) + +/* Used in hifcadm tool */ +#define UNF_ENABLE_DIF_DIX_PROT 1 +#define UNF_ENABLE_DIF_PROT 2 +#define UNF_ENABLE_DIX_PROT 3 + +#define UNF_DISABLE_IP_CHECKSUM 0 +#define UNF_ENABLE_IP_CHECKSUM 1 + +#define UNF_APP_REF_ESC_BOTH_NOT_CHECK 0 +#define UNF_APP_ESC_CHECK 1 +#define UNF_REF_ESC_CHECK 2 +#define UNF_APP_REF_ESC_BOTH_CHECK 3 + +struct unf_global_card_thread_s { + struct list_head list_card_list_head; + spinlock_t global_card_list_lock; + unsigned int card_sum; +}; + +/* Global L_Port MG,manage all L_Port */ +struct unf_global_lport_s { + struct list_head list_lport_list_head; + + /* Temporary list,used in hold list traverse */ + struct list_head list_intergrad_head; + + /* destroy list,used in card remove */ + struct list_head list_destroy_head; + + /* Dirty list,abnormal port */ + struct list_head list_dirty_head; + spinlock_t global_lport_list_lock; + unsigned int lport_sum; + unsigned char dft_mode; + int b_start_work; +}; + +struct unf_reset_port_argin { + unsigned int port_id; +}; + +struct unf_get_topo_argout { + unsigned int *topo_cfg; + enum unf_act_topo_e *en_act_topo; +}; + +struct unf_set_topo_argin { + unsigned int port_id; + unsigned int topo; +}; + +struct unf_set_bbscn_argin { + unsigned int port_id; + unsigned int bb_scn; +}; + +struct unf_set_sfp_argin { + unsigned int port_id; + int turn_on; +}; + +struct unf_set_speed_argin { + unsigned int port_id; + unsigned int *speed; +}; + +struct unf_get_sfp_argout { + unsigned int *status; + union unf_sfp_eeprome_info *sfp_info; +}; + +struct unf_get_allinfo_argout { + unsigned int *out_size; + unsigned int in_size; + void *out_buf; + void *in_buf; + void *lport; +}; + +struct unf_port_action_s { + unsigned int action; + unsigned int (*fn_unf_action)(struct unf_lport_s *v_lport, + void *v_input); +}; + +struct unf_hifcadm_action_s { + unsigned int hifc_action; + int (*fn_unf_hifc_action)(struct unf_lport_s *v_lport, + struct unf_hinicam_pkg *v_input); +}; + +struct unf_lport_info { +#define NPIVMAX 255 + unsigned int port_id; + unsigned int options; + int b_start_work; + unsigned int phy_link; + unsigned int link_up; + unsigned int act_speed; + unsigned int cfg_speed; + unsigned int tape_support; + unsigned long long port_name; + unsigned int msi; + unsigned int ini_io_retry_timeout; + unsigned int support_max_npiv_num; + unsigned int act_topo; + unsigned int port_topology; + unsigned int fc_ser_max_speed; + unsigned int loss_of_signal_count; + unsigned int bad_rx_char_count; + unsigned int loss_of_sync_count; + unsigned int link_fail_count; + unsigned int rx_eo_fa_count; + unsigned int dis_frame_count; + unsigned int bad_crc_count; + unsigned int proto_error_count; + unsigned int cfg_led_mode; + unsigned char chip_type; + unsigned char vport_num; + unsigned short rsvd1; + unsigned int vport_id[NPIVMAX]; + unsigned int nport_id; +}; + +struct unf_admin_msg_head { + unsigned int size; + unsigned short status; + unsigned char success_num; + unsigned char rsvd; +}; + +#define UNF_PORT_INFO_SIZE 10 + +struct unf_adm_cmd { + struct unf_admin_msg_head msg_head; + unsigned int arg[UNF_PORT_INFO_SIZE]; +}; + +struct unf_adm_xchg { + unsigned int aborted; + unsigned int ini_busy; + unsigned int tgt_busy; + unsigned int delay; + unsigned int free; + unsigned int wait; + unsigned int sfs_free; + unsigned int sfs_busy; +}; + +enum unf_admin_msg_status_e { + UNF_ADMIN_MSG_DONE = 0, + UNF_ADMIN_MSG_INCOMPLETE, + UNF_ADMIN_MSG_FAILED, + UNF_ADMIN_MSG_BUTT +}; + +/* the structure define with fc unf driver */ +enum fc_dfx_io_count_type_e { + FC_HOST_COUNTER = 0, + FC_HOST_SCSI_CMD_IN_TOTAL, + FC_HOST_SCSI_CMD_DONE_TOTAL, + FC_SESSION_COUNTER, + FC_SESSION_SCSI_CMD_IN, + FC_SESSION_SCSI_CMD_DONE, + FC_SRB_COUNT, +}; + +enum unf_msg_format_e { + UNF_PORT_SET_OP = 1, + UNF_TOPO_SET_OP, + UNF_SPEED_SET_OP, + UNF_INFO_GET_OP, + UNF_INFO_CLEAR_OP, + UNF_SFP_INFO_OP, + UNF_DFX, + UNF_FEC_SET = 8, + UNF_BBSCN, + UNF_VPORT, + UNF_LINK_DELAY = 11, + UNF_DIF, + UNF_DIF_CONFIG = 14, + UNF_SAVA_DATA, + UNF_SHOW_XCHG = 23, + UNF_PORTSTAT = 24, + UNF_ALL_INFO_OP = 25, + FC_LINK_TMO_OPT = 26, + FC_DRV_LOG_OPT = 27, + UNF_COMPAT_TEST = 0xFF +}; + +struct unf_save_info_head_s { + unsigned int opcode : 4; + unsigned int type : 4; + unsigned int entry_num : 8; + unsigned int next : 16; +}; + +enum unf_save_info_type_e { + UNF_SESSION_QOS = 0, + UNF_PORT_BASE_INFO = 2, + UNF_SAVE_TYPE_BUTT, +}; + +struct unf_link_tmo_opt_s { + struct unf_admin_msg_head head; + unsigned int link_opt; + int tmo_value; + unsigned int sync_all_port; +}; + +struct unf_log_level_opt_s { + struct unf_admin_msg_head head; + unsigned int log_opt; + unsigned int log_level; + unsigned int log_fre_qunce; +}; + +extern struct unf_global_lport_s global_lport_mgr; +extern struct unf_global_card_thread_s card_thread_mgr; +extern struct workqueue_struct *unf_work_queue; + +struct unf_lport_s *unf_find_lport_by_port_id(unsigned int v_port_id); +struct unf_lport_s *unf_find_lport_by_scsi_host_id(unsigned int scsi_host_id); +void *unf_lport_create_and_init( + void *private_data, + struct unf_low_level_function_op_s *low_level_op); +int unf_cm_reset_port(unsigned int v_port_id); +int unf_cm_sfp_switch(unsigned int v_port_id, int v_bturn_on); +int unf_cm_get_sfp_info(unsigned int v_port_id, unsigned int *v_status, + union unf_sfp_eeprome_info *v_sfp_info, + unsigned int *sfp_type); +int unf_cm_set_port_bbscn(unsigned int v_port_id, unsigned int v_bbscn); +int unf_cm_set_port_topo(unsigned int v_port_id, unsigned int v_topo); +int unf_cm_get_port_topo(unsigned int v_port_id, + unsigned int *v_topo_cfg, + enum unf_act_topo_e *v_en_act_topo); +int unf_cm_clear_port_error_code_sum(unsigned int v_port_id); +unsigned int unf_fc_port_link_event(void *v_lport, unsigned int v_events, + void *v_input); +unsigned int unf_release_local_port(void *v_lport); +void unf_lport_route_work(struct work_struct *v_work); +void unf_lport_update_topo(struct unf_lport_s *v_lport, + enum unf_act_topo_e v_enactive_topo); +void unf_lport_ref_dec(struct unf_lport_s *v_lport); +unsigned int unf_lport_refinc(struct unf_lport_s *v_lport); +void unf_lport_ref_dec_to_destroy(struct unf_lport_s *v_lport); +int unf_send_event(unsigned int port_id, unsigned int syn_flag, + void *argc_in, void *argc_out, + int (*p_func)(void *argc_in, void *argc_out)); +void unf_port_mgmt_deinit(void); +void unf_port_mgmt_init(void); +int unf_cm_echo_test(unsigned int v_port_id, unsigned int v_nport_id, + unsigned int *v_link_delay); +void unf_show_dirty_port(int v_show_only, unsigned int *v_ditry_port_num); +unsigned int unf_get_error_code_sum(struct unf_lport_s *v_lport, + struct unf_err_code_s *v_fc_err_code); +int unf_cm_set_port_speed(unsigned int v_port_id, unsigned int *v_speed); +void *unf_lookup_lport_by_nport_id(void *v_lport, unsigned int v_nport_id); +int unf_cmd_adm_handler(void *v_lport, struct unf_hinicam_pkg *v_input); +unsigned int unf_is_lport_valid(struct unf_lport_s *v_lport); +unsigned int unf_cm_save_port_info(unsigned int v_port_id); +unsigned int unf_cm_get_save_info(struct unf_lport_s *v_lport); +unsigned int unf_cm_clear_flush(unsigned int v_port_id); +int unf_lport_reset_port(struct unf_lport_s *v_lport, unsigned int v_flag); +unsigned int unf_register_scsi_host(struct unf_lport_s *v_lport); +void unf_unregister_scsi_host(struct unf_lport_s *v_lport); +int unf_get_link_lose_tmo(struct unf_lport_s *v_lport); +int unf_set_link_lose_tmo(struct unf_lport_s *v_lport, int time_out); +void unf_init_link_lose_tmo(struct unf_lport_s *v_lport); +int unf_set_link_lose_tmo_to_all(int time_out); +void unf_destroy_scsi_id_table(struct unf_lport_s *v_lport); +unsigned int unf_lport_login(struct unf_lport_s *v_lport, + enum unf_act_topo_e v_en_act_topo); +unsigned int unf_init_scsi_id_table(struct unf_lport_s *v_lport); +void unf_set_lport_removing(struct unf_lport_s *v_lport); +void unf_lport_release_lw_fun_op(struct unf_lport_s *v_lport); +void unf_disc_state_ma(struct unf_lport_s *v_lport, + enum unf_disc_event_e v_event); +unsigned int unf_init_lport_mgr_temp(struct unf_lport_s *v_lport); +void unf_release_lport_mgr_temp(struct unf_lport_s *v_lport); + +#endif
From: Chenguangli chenguangli2@huawei.com
driver inclusion category: feature bugzilla: NA
-----------------------------------------------------------------------
This module maintains hifc driver port resources, including HBA, Lport, Rport, queue, and npiv.
Signed-off-by: Chenguangli chenguangli2@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Acked-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/scsi/huawei/hifc/hifc_chipitf.c | 2095 +++++++ drivers/scsi/huawei/hifc/hifc_chipitf.h | 643 +++ drivers/scsi/huawei/hifc/hifc_hba.c | 1627 ++++++ drivers/scsi/huawei/hifc/hifc_hba.h | 234 + drivers/scsi/huawei/hifc/hifc_queue.c | 7020 +++++++++++++++++++++++ drivers/scsi/huawei/hifc/hifc_queue.h | 1363 +++++ drivers/scsi/huawei/hifc/unf_lport.c | 1129 ++++ drivers/scsi/huawei/hifc/unf_lport.h | 569 ++ drivers/scsi/huawei/hifc/unf_npiv.c | 1481 +++++ drivers/scsi/huawei/hifc/unf_npiv.h | 50 + drivers/scsi/huawei/hifc/unf_rport.c | 2430 ++++++++ drivers/scsi/huawei/hifc/unf_rport.h | 285 + 12 files changed, 18926 insertions(+) create mode 100644 drivers/scsi/huawei/hifc/hifc_chipitf.c create mode 100644 drivers/scsi/huawei/hifc/hifc_chipitf.h create mode 100644 drivers/scsi/huawei/hifc/hifc_hba.c create mode 100644 drivers/scsi/huawei/hifc/hifc_hba.h create mode 100644 drivers/scsi/huawei/hifc/hifc_queue.c create mode 100644 drivers/scsi/huawei/hifc/hifc_queue.h create mode 100644 drivers/scsi/huawei/hifc/unf_lport.c create mode 100644 drivers/scsi/huawei/hifc/unf_lport.h create mode 100644 drivers/scsi/huawei/hifc/unf_npiv.c create mode 100644 drivers/scsi/huawei/hifc/unf_npiv.h create mode 100644 drivers/scsi/huawei/hifc/unf_rport.c create mode 100644 drivers/scsi/huawei/hifc/unf_rport.h
diff --git a/drivers/scsi/huawei/hifc/hifc_chipitf.c b/drivers/scsi/huawei/hifc/hifc_chipitf.c new file mode 100644 index 000000000000..fe97f6468557 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_chipitf.c @@ -0,0 +1,2095 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Fabric Channel Linux driver + * Copyright(c) 2018 Huawei Technologies Co., Ltd + * + */ + +#include "unf_common.h" +#include "hifc_chipitf.h" + +#define HIFC_MBOX_TIME_SEC_MAX 60 + +#define HIFC_LINK_UP_COUNT 1 +#define HIFC_LINK_DOWN_COUNT 2 +#define HIFC_FC_DELETE_CMND_COUNT 3 + +#define HIFC_MBX_MAX_TIMEOUT 10000 + +static unsigned int hifc_recv_fc_link_up(struct hifc_hba_s *v_hba, + void *v_buf_in); +static unsigned int hifc_recv_fc_link_down(struct hifc_hba_s *v_hba, + void *v_buf_in); +static unsigned int hifc_recv_fc_del_cmd(struct hifc_hba_s *v_hba, + void *v_buf_in); +static unsigned int hifc_recv_fc_error(struct hifc_hba_s *v_hba, + void *v_buf_in); + +static struct hifc_up_2_drv_msg_handle_s up_msg_handle[] = { + { HIFC_MBOX_RECV_FC_LINKUP, hifc_recv_fc_link_up }, + { HIFC_MBOX_RECV_FC_LINKDOWN, hifc_recv_fc_link_down }, + { HIFC_MBOX_RECV_FC_DELCMD, hifc_recv_fc_del_cmd }, + { HIFC_MBOX_RECV_FC_ERROR, hifc_recv_fc_error } +}; + +void hifc_up_msg_2_driver_proc(void *v_hwdev_handle, void *v_pri_handle, + unsigned char v_cmd, void *v_buf_in, + unsigned short v_in_size, void *v_buf_out, + unsigned short *v_out_size) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int index = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_mbox_header_s *mbx_header = NULL; + + HIFC_CHECK(INVALID_VALUE32, v_hwdev_handle, return); + HIFC_CHECK(INVALID_VALUE32, v_pri_handle, return); + HIFC_CHECK(INVALID_VALUE32, v_buf_in, return); + HIFC_CHECK(INVALID_VALUE32, v_buf_out, return); + HIFC_CHECK(INVALID_VALUE32, v_out_size, return); + + hba = (struct hifc_hba_s *)v_pri_handle; + if (!hba) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR, + "[err]Hba is null"); + + return; + } + + mbx_header = (struct hifc_mbox_header_s *)v_buf_in; + if (mbx_header->cmnd_type != v_cmd) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR, + "[err]Port(0x%x) cmd(0x%x) is not matched with header cmd type(0x%x)", + hba->port_cfg.port_id, v_cmd, + mbx_header->cmnd_type); + return; + } + + while (index < (sizeof(up_msg_handle) / + sizeof(struct hifc_up_2_drv_msg_handle_s))) { + if ((v_cmd == up_msg_handle[index].cmd) && + (up_msg_handle[index].pfn_hifc_msg_up2drv_handler)) { + ret = + up_msg_handle[index].pfn_hifc_msg_up2drv_handler( + hba, + v_buf_in); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, + UNF_ERR, + "[warn]Port(0x%x) process up cmd(0x%x) failed", + hba->port_cfg.port_id, v_cmd); + } + + /* Process Done & return */ + return; + } + index++; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR, + "[err]Port(0x%x) process up cmd(0x%x) failed", + hba->port_cfg.port_id, v_cmd); + + PRINT_OUTBOUND_IOB(UNF_MAJOR, v_buf_in, ((unsigned int)v_in_size)); +} + +unsigned int hifc_get_chip_msg(void *v_hba, void *v_mac) +{ + struct hifc_hba_s *hba = NULL; + struct unf_get_chip_info_argout *wwn = NULL; + struct hifc_inbox_get_chip_info_s get_chip_info; + union hifc_outmbox_generic_u *chip_info_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_mac, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + wwn = (struct unf_get_chip_info_argout *)v_mac; + + memset(&get_chip_info, 0, sizeof(struct hifc_inbox_get_chip_info_s)); + + chip_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!chip_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(chip_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_chip_info.header.cmnd_type = HIFC_MBOX_GET_CHIP_INFO; + get_chip_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_get_chip_info_s)); + + if (hifc_mb_send_and_wait_mbox(hba, &get_chip_info, + sizeof(get_chip_info), chip_info_sts) != + RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + get_chip_info.header.cmnd_type); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + chip_info_sts->get_chip_info_sts.status); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.header.cmnd_type != + HIFC_MBOX_GET_CHIP_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + chip_info_sts->get_chip_info_sts.header.cmnd_type); + + goto exit; + } + + wwn->board_type = chip_info_sts->get_chip_info_sts.board_type; + hba->card_info.card_type = chip_info_sts->get_chip_info_sts.board_type; + wwn->wwpn = chip_info_sts->get_chip_info_sts.wwpn; + wwn->wwnn = chip_info_sts->get_chip_info_sts.wwnn; + wwn->sys_mac = chip_info_sts->get_chip_info_sts.sys_mac; + + ret = RETURN_OK; +exit: + kfree(chip_info_sts); + return ret; +} + +unsigned int hifc_get_chip_capability(void *hw_dev_handle, + struct hifc_chip_info_s *v_chip_info) +{ + struct hifc_inbox_get_chip_info_s get_chip_info; + union hifc_outmbox_generic_u *chip_info_sts = NULL; + unsigned short out_size = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, hw_dev_handle, return UNF_RETURN_ERROR); + + memset(&get_chip_info, 0, sizeof(struct hifc_inbox_get_chip_info_s)); + + chip_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!chip_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(chip_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_chip_info.header.cmnd_type = HIFC_MBOX_GET_CHIP_INFO; + get_chip_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_get_chip_info_s)); + out_size = sizeof(union hifc_outmbox_generic_u); + + if (hifc_msg_to_mgmt_sync(hw_dev_handle, HIFC_MOD_FC, + HIFC_MBOX_GET_CHIP_INFO, + (void *)&get_chip_info.header, + sizeof(struct hifc_inbox_get_chip_info_s), + (union hifc_outmbox_generic_u *)chip_info_sts, + &out_size, + (HIFC_MBX_MAX_TIMEOUT)) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + HIFC_MBOX_GET_CHIP_INFO); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port mailbox status incorrect status(0x%x) .", + chip_info_sts->get_chip_info_sts.status); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.header.cmnd_type != + HIFC_MBOX_GET_CHIP_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port receive mailbox type incorrect type: 0x%x.", + chip_info_sts->get_chip_info_sts.header.cmnd_type); + + goto exit; + } + + v_chip_info->wwnn = chip_info_sts->get_chip_info_sts.wwnn; + v_chip_info->wwpn = chip_info_sts->get_chip_info_sts.wwpn; + v_chip_info->tape_support = (unsigned char) + chip_info_sts->get_chip_info_sts.tape_support; + ret = RETURN_OK; +exit: + kfree(chip_info_sts); + return ret; +} + +void hifc_get_red_info_by_rw_type(struct unf_rw_reg_param_s *param, + struct hifc_inmbox_get_reg_info_s *v_reg_info) +{ + if ((param->rw_type == UNF_READ) || + (param->rw_type == UNF_READ_64)) { + v_reg_info->op_code = 0; + } else if ((param->rw_type == UNF_WRITE) || + (param->rw_type == UNF_WRITE_64)) { + v_reg_info->op_code = 1; + } + + if ((param->rw_type == UNF_READ) || + (param->rw_type == UNF_WRITE)) { + v_reg_info->reg_len = 32; + } else if ((param->rw_type == UNF_READ_64) || + (param->rw_type == UNF_WRITE_64)) { + v_reg_info->reg_len = 64; + } +} + +unsigned int hifc_rw_reg(void *v_hba, void *v_params) +{ + struct hifc_hba_s *hba = NULL; + struct unf_rw_reg_param_s *param = NULL; + struct hifc_inmbox_get_reg_info_s reg_info; + union hifc_outmbox_generic_u *reg_info_sts = NULL; + unsigned int para_value_out_l = 0; + unsigned int para_value_out_h = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_params, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + param = (struct unf_rw_reg_param_s *)v_params; + + memset(®_info, 0, sizeof(struct hifc_inmbox_get_reg_info_s)); + reg_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!reg_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(reg_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + hifc_get_red_info_by_rw_type(param, ®_info); + + reg_info.reg_addr = param->offset; + reg_info.reg_value_l32 = (param->value) & VALUEMASK_L; + reg_info.reg_value_h32 = ((param->value) & VALUEMASK_H) >> 32; + + reg_info.header.cmnd_type = HIFC_MBOX_REG_RW_MODE; + reg_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_reg_info_s)); + + if (hifc_mb_send_and_wait_mbox(hba, ®_info, + sizeof(reg_info), + reg_info_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + reg_info.header.cmnd_type); + + goto exit; + } + + if (reg_info_sts->get_reg_info_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + reg_info_sts->get_reg_info_sts.status); + + goto exit; + } + + if (reg_info_sts->get_reg_info_sts.header.cmnd_type != + HIFC_MBOX_REG_RW_MODE_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + reg_info_sts->get_reg_info_sts.header.cmnd_type); + + goto exit; + } + + para_value_out_l = reg_info_sts->get_reg_info_sts.reg_value_l32; + para_value_out_h = reg_info_sts->get_reg_info_sts.reg_value_h32; + param->value = (unsigned long long)para_value_out_l | + ((unsigned long long)para_value_out_h << 32); + + ret = RETURN_OK; +exit: + kfree(reg_info_sts); + return ret; +} + +unsigned int hifc_config_port_table(struct hifc_hba_s *v_hba) +{ + struct hifc_inbox_config_api_s config_api; + union hifc_outmbox_generic_u *out_mbox = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + memset(&config_api, 0, sizeof(config_api)); + out_mbox = kmalloc(sizeof(union hifc_outmbox_generic_u), GFP_ATOMIC); + if (!out_mbox) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(out_mbox, 0, sizeof(union hifc_outmbox_generic_u)); + + config_api.header.cmnd_type = HIFC_MBOX_CONFIG_API; + config_api.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_config_api_s)); + + config_api.op_code = UNDEFINEOPCODE; + + /* change switching top cmd of CM to the cmd that up recognize */ + /* if the cmd equals UNF_TOP_P2P_MASK sending in CM means that it + * should be changed into P2P top, LL using HIFC_TOP_NON_LOOP_MASK + */ + if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_P2P_MASK) { + config_api.topy_mode = 0x2; + /* if the cmd equals UNF_TOP_LOOP_MASK sending in CM means that it + * should be changed into loop top, LL using HIFC_TOP_LOOP_MASK + */ + } else if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_LOOP_MASK) { + config_api.topy_mode = 0x1; + + /* if the cmd equals UNF_TOP_AUTO_MASK sending in CM means that it + * should be changed into loop top, LL using HIFC_TOP_AUTO_MASK + */ + } else if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_AUTO_MASK) { + config_api.topy_mode = 0x0; + } else { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) topo cmd is error, command type: 0x%x", + v_hba->port_cfg.port_id, + (unsigned char)v_hba->port_topo_cfg); + + return UNF_RETURN_ERROR; + } + + /* About speed */ + config_api.sfp_speed = (unsigned char)(v_hba->port_speed_cfg); + config_api.max_speed = (unsigned char)(v_hba->max_support_speed); + + config_api.rx_bbcredit_32g = HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT; + config_api.rx_bbcredit_16g = HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT; + config_api.rx_bbcredit_842g = HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT; + config_api.rdy_cnt_bf_fst_frm = HIFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT; + config_api.esch_value_32g = HIFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE; + config_api.esch_value_16g = HIFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE; + config_api.esch_value_8g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE; + config_api.esch_value_4g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE; + config_api.esch_value_2g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE; + config_api.esch_bust_size = HIFC_LOWLEVEL_DEFAULT_ESCH_BUS_SIZE; + + /* default value:0xFF */ + config_api.hard_alpa = 0xFF; + memcpy(config_api.port_name, v_hba->sys_port_name, UNF_WWN_LEN); + + /* if only for slave, the value is 1; if participate master choosing, + * the value is 0 + */ + config_api.slave = v_hba->port_loop_role; + + /* 1:auto negotiate, 0:fixed mode negotiate */ + if (config_api.sfp_speed == 0) + config_api.auto_sneg = 0x1; + else + config_api.auto_sneg = 0x0; + + /* send & wait */ + if (hifc_mb_send_and_wait_mbox(v_hba, &config_api, + sizeof(config_api), + out_mbox) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x", + v_hba->port_cfg.port_id, + config_api.header.cmnd_type); + + goto exit; + } + + /* mailbox status check */ + if (out_mbox->config_api_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) with status(0x%x) error", + v_hba->port_cfg.port_id, + out_mbox->config_api_sts.header.cmnd_type, + out_mbox->config_api_sts.status); + + goto exit; + } + + /* RSP type check */ + if (out_mbox->config_api_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_API_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) error", + v_hba->port_cfg.port_id, + out_mbox->config_api_sts.header.cmnd_type); + + goto exit; + } + + ret = RETURN_OK; +exit: + kfree(out_mbox); + return ret; +} + +unsigned int hifc_port_switch(struct hifc_hba_s *v_hba, int turn_on) +{ + struct hifc_inbox_port_switch_s port_switch; + union hifc_outmbox_generic_u *port_switch_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + memset(&port_switch, 0, sizeof(port_switch)); + + port_switch_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_switch_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_switch_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + port_switch.header.cmnd_type = HIFC_MBOX_PORT_SWITCH; + port_switch.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_port_switch_s)); + port_switch.op_code = (unsigned char)turn_on; + port_switch.port_type = (unsigned char)v_hba->port_type; + + /* set the value is 0 first, vn2vf mode, vlan discovery automatically */ + port_switch.host_id = 0; + port_switch.pf_id = + (unsigned char)(hifc_global_func_id(v_hba->hw_dev_handle)); + port_switch.fcoe_mode = HIFC_FIP_MODE_VN2VF; + port_switch.conf_vlan = 0xffff; + port_switch.sys_node_name = *(unsigned long long *)v_hba->sys_node_name; + port_switch.sys_port_wwn = *(unsigned long long *)v_hba->sys_port_name; + + /* send & wait mailbox */ + if (hifc_mb_send_and_wait_mbox(v_hba, &port_switch, sizeof(port_switch), + port_switch_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) HIFC can't send and wait mailbox, command type(0x%x) opcode(0x%x)", + v_hba->port_cfg.port_id, + port_switch.header.cmnd_type, port_switch.op_code); + + goto exit; + } + + /* check mailbox rsp status */ + if (port_switch_sts->port_switch_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) status(0x%x) error", + v_hba->port_cfg.port_id, + port_switch_sts->port_switch_sts.header.cmnd_type, + port_switch_sts->port_switch_sts.status); + + goto exit; + } + + /* check mailbox rsp type */ + if (port_switch_sts->port_switch_sts.header.cmnd_type != + HIFC_MBOX_PORT_SWITCH_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) error", + v_hba->port_cfg.port_id, + port_switch_sts->port_switch_sts.header.cmnd_type); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_SUC, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[event]Port(0x%x) switch succeed, turns to %s", + v_hba->port_cfg.port_id, + (turn_on) ? "on" : "off"); + + ret = RETURN_OK; +exit: + kfree(port_switch_sts); + return ret; +} + +unsigned int hifc_config_login_api(struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_para) +{ +#define HIFC_LOOP_RDYNUM 8 + int async_ret = RETURN_OK; + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_inmbox_config_login_s cfg_login; + union hifc_outmbox_generic_u *cfg_login_sts = NULL; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + memset(&cfg_login, 0, sizeof(cfg_login)); + cfg_login_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!cfg_login_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(cfg_login_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + cfg_login.header.cmnd_type = HIFC_MBOX_CONFIG_LOGIN_API; + cfg_login.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_config_login_s)); + cfg_login.header.port_id = v_hba->port_index; + + cfg_login.op_code = UNDEFINEOPCODE; + + cfg_login.tx_bb_credit = v_hba->remote_bbcredit; + + cfg_login.etov = v_hba->compared_edtov_val; + cfg_login.rtov = v_hba->compared_ratov_val; + + cfg_login.rt_tov_tag = v_hba->remote_rttov_tag; + cfg_login.ed_tov_tag = v_hba->remote_edtov_tag; + cfg_login.bb_credit = v_hba->remote_bbcredit; + cfg_login.bbscn = HIFC_LSB(v_hba->compared_bbscn); + + if (cfg_login.bbscn) { + cfg_login.lr_flag = + (v_login_para->els_cmnd_code == ELS_PLOGI) ? 0 : 1; + ret = hifc_mb_send_and_wait_mbox(v_hba, &cfg_login, + sizeof(cfg_login), + cfg_login_sts); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.", + v_hba->port_cfg.port_id, + cfg_login.header.cmnd_type); + + goto exit; + } + + if (cfg_login_sts->config_login_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_LOGIN_API_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, + UNF_INFO, "Port(0x%x) Receive mailbox type incorrect. Type: 0x%x.", + v_hba->port_cfg.port_id, + cfg_login_sts->config_login_sts.header.cmnd_type); + + goto exit; + } + + if (cfg_login_sts->config_login_sts.status != STATUS_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", + v_hba->port_cfg.port_id, + cfg_login_sts->config_login_sts.header.cmnd_type, + cfg_login_sts->config_login_sts.status); + + goto exit; + } + } else { + async_ret = hifc_msg_to_mgmt_async(v_hba->hw_dev_handle, + HIFC_MOD_FC, + HIFC_MBOX_CONFIG_LOGIN_API, + &cfg_login, + sizeof(cfg_login)); + + if (async_ret != 0) { + HIFC_MAILBOX_STAT(v_hba, + HIFC_SEND_CONFIG_LOGINAPI_FAIL); + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "Port(0x%x) hifc can't send config login cmd to up,ret:%d.", + v_hba->port_cfg.port_id, async_ret); + + goto exit; + } + + HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CONFIG_LOGINAPI); + } + + HIFC_TRACE(UNF_EVTLOG_LINK_SUC, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) Topo(0x%x) Config login param to up: txbbcredit(0x%x), BB_SC_N(0x%x).", + v_hba->port_cfg.port_id, v_hba->active_topo, + cfg_login.tx_bb_credit, cfg_login.bbscn); + + ret = RETURN_OK; +exit: + kfree(cfg_login_sts); + return ret; +} + +unsigned int hifc_mb_send_and_wait_mbox(struct hifc_hba_s *v_hba, + const void *v_in_mbox, + unsigned short in_size, + union hifc_outmbox_generic_u *out_mbox) +{ + void *handle = NULL; + unsigned short out_size = 0; + unsigned long time_out = 0; + int ret = 0; + struct hifc_mbox_header_s *header; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_in_mbox, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, out_mbox, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_hba->hw_dev_handle, + return UNF_RETURN_ERROR); + + header = (struct hifc_mbox_header_s *)v_in_mbox; + out_size = sizeof(union hifc_outmbox_generic_u); + handle = v_hba->hw_dev_handle; + + /* Wait for las mailbox completion: */ + time_out = wait_for_completion_timeout( + &v_hba->mbox_complete, + (unsigned long)msecs_to_jiffies(HIFC_MBOX_TIME_SEC_MAX * 1000)); + if (time_out == UNF_ZERO) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) wait mailbox(0x%x) completion timeout: %d sec", + v_hba->port_cfg.port_id, header->cmnd_type, + HIFC_MBOX_TIME_SEC_MAX); + + return UNF_RETURN_ERROR; + } + + /* Send Msg to uP Sync: timer 10s */ + ret = hifc_msg_to_mgmt_sync(handle, HIFC_MOD_FC, header->cmnd_type, + (void *)v_in_mbox, in_size, + (union hifc_outmbox_generic_u *)out_mbox, + &out_size, + HIFC_MBX_MAX_TIMEOUT); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) can not send mailbox(0x%x) with ret:%d", + v_hba->port_cfg.port_id, header->cmnd_type, ret); + + complete(&v_hba->mbox_complete); + return UNF_RETURN_ERROR; + } + + complete(&v_hba->mbox_complete); + return RETURN_OK; +} + +unsigned short hifc_get_global_base_qpn(void *v_handle) +{ +#define NIC_UP_CMD_GET_GLOBAL_QPN 102 + + int ret = 0; + unsigned short out_size = 0; + struct hifc_get_global_base_qpn_s qpn_base = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return INVALID_VALUE16); + qpn_base.func_id = hifc_global_func_id(v_handle); + out_size = (u16)sizeof(struct hifc_get_global_base_qpn_s); + + /* Send Msg to uP Sync: timer 10s */ + ret = hifc_msg_to_mgmt_sync(v_handle, + HIFC_MOD_L2NIC, + NIC_UP_CMD_GET_GLOBAL_QPN, + &qpn_base, + (u16)sizeof(qpn_base), + &qpn_base, + &out_size, + HIFC_MBX_MAX_TIMEOUT); + + if (ret || (!out_size) || qpn_base.status) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]hifc_get_global_base_qpn failed, ret %d, out_size %u, qpn_info.ret%u", + ret, out_size, qpn_base.status); + + return 0xFFFF; + } + + return (u16)(qpn_base.base_qpn); +} + +void hifc_initial_dynamic_info(struct hifc_hba_s *v_fc_port) +{ + struct hifc_hba_s *hba = v_fc_port; + unsigned long flag = 0; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return); + + spin_lock_irqsave(&hba->hba_lock, flag); + hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN; + hba->active_topo = UNF_ACT_TOP_UNKNOWN; + hba->phy_link = UNF_PORT_LINK_DOWN; + hba->q_set_stage = HIFC_QUEUE_SET_STAGE_INIT; + hba->loop_map_valid = LOOP_MAP_INVALID; + hba->delay_info.srq_delay_flag = 0; + hba->delay_info.root_rq_rcvd_flag = 0; + spin_unlock_irqrestore(&hba->hba_lock, flag); +} + +unsigned int hifc_recv_fc_link_up(struct hifc_hba_s *v_hba, void *v_buf_in) +{ +#define HIFC_LOOP_MASK 0x1 +#define HIFC_LOOPMAP_COUNT 128 + + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_link_event_s *buf_in = NULL; + + buf_in = (struct hifc_link_event_s *)v_buf_in; + v_hba->phy_link = UNF_PORT_LINK_UP; + v_hba->active_port_speed = buf_in->speed; + v_hba->led_states.green_speed_led = + (unsigned char)(buf_in->green_speed_led); + v_hba->led_states.yellow_speed_led = + (unsigned char)(buf_in->yellow_speed_led); + v_hba->led_states.ac_led = (unsigned char)(buf_in->acled); + + if ((buf_in->top_type == HIFC_LOOP_MASK) && + ((buf_in->loop_map_info[1] == UNF_FL_PORT_LOOP_ADDR) || + (buf_in->loop_map_info[2] == UNF_FL_PORT_LOOP_ADDR))) { + v_hba->active_topo = UNF_ACT_TOP_PUBLIC_LOOP; /* Public Loop */ + v_hba->active_al_pa = buf_in->alpa_value; /* AL_PA */ + memcpy(v_hba->loop_map, buf_in->loop_map_info, + HIFC_LOOPMAP_COUNT); + v_hba->loop_map_valid = LOOP_MAP_VALID; + } else if (buf_in->top_type == HIFC_LOOP_MASK) { + v_hba->active_topo = UNF_ACT_TOP_PRIVATE_LOOP;/* Private Loop */ + v_hba->active_al_pa = buf_in->alpa_value; /* AL_PA */ + memcpy(v_hba->loop_map, buf_in->loop_map_info, + HIFC_LOOPMAP_COUNT); + v_hba->loop_map_valid = LOOP_MAP_VALID; + } else { + v_hba->active_topo = UNF_TOP_P2P_MASK; /* P2P_D or P2P_F */ + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[event]Port(0x%x) receive link up event(0x%x) with speed(0x%x) uP_topo(0x%x) driver_topo(0x%x)", + v_hba->port_cfg.port_id, buf_in->link_event, + buf_in->speed, buf_in->top_type, v_hba->active_topo); + + /* Set clear & flush state */ + hifc_set_hba_flush_state(v_hba, UNF_FALSE); + hifc_set_root_sq_flush_state(v_hba, UNF_FALSE); + hifc_set_rport_flush_state(v_hba, UNF_FALSE); + + /* Report link up event to COM */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, + UNF_PORT_LINK_UP, &v_hba->active_port_speed); + + HIFC_LINK_EVENT_STAT(v_hba, HIFC_LINK_UP_COUNT); + + return ret; +} + +unsigned int hifc_recv_fc_link_down(struct hifc_hba_s *v_hba, void *v_buf_in) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_link_event_s *buf_in = NULL; + + buf_in = (struct hifc_link_event_s *)v_buf_in; + + /* 1. Led state setting */ + v_hba->led_states.green_speed_led = + (unsigned char)(buf_in->green_speed_led); + v_hba->led_states.yellow_speed_led = + (unsigned char)(buf_in->yellow_speed_led); + v_hba->led_states.ac_led = (unsigned char)(buf_in->acled); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[event]Port(0x%x) receive link down event(0x%x) reason(0x%x)", + v_hba->port_cfg.port_id, buf_in->link_event, buf_in->reason); + + hifc_initial_dynamic_info(v_hba); + + /* 2. set HBA flush state */ + hifc_set_hba_flush_state(v_hba, UNF_TRUE); + + /* 3. set Root SQ flush state */ + hifc_set_root_sq_flush_state(v_hba, UNF_TRUE); + + /* 4. set R_Port (parent SQ) flush state */ + hifc_set_rport_flush_state(v_hba, UNF_TRUE); + + /* 5. Report link down event to COM */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_LINK_DOWN, 0); + + /* DFX setting */ + HIFC_LINK_REASON_STAT(v_hba, buf_in->reason); + HIFC_LINK_EVENT_STAT(v_hba, HIFC_LINK_DOWN_COUNT); + + return ret; +} + +unsigned int hifc_recv_fc_del_cmd(struct hifc_hba_s *v_hba, void *v_buf_in) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_link_event_s *buf_in = NULL; + + buf_in = (struct hifc_link_event_s *)v_buf_in; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) receive delete cmd event(0x%x)", + v_hba->port_cfg.port_id, buf_in->link_event); + + /* Send buffer clear cmnd */ + ret = hifc_clear_fetched_sq_wqe(v_hba); + + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_SCANNING; + HIFC_LINK_EVENT_STAT(v_hba, HIFC_FC_DELETE_CMND_COUNT); + + HIFC_REFERNCE_VAR(buf_in, buf_in, ret); + return ret; +} + +unsigned int hifc_recv_fc_error(struct hifc_hba_s *v_hba, void *v_buf_in) +{ +#define FC_ERR_LEVEL_DEAD 0 +#define FC_ERR_LEVEL_HIGH 1 +#define FC_ERR_LEVEL_LOW 2 + + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_up_error_event_s *buf_in = NULL; + + buf_in = (struct hifc_up_error_event_s *)v_buf_in; + if (buf_in->error_type >= HIFC_UP_ERR_BUTT || + buf_in->error_value >= HIFC_ERR_VALUE_BUTT) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive a unsupported UP Error Event Type(0x%x) Value(0x%x).", + v_hba->port_cfg.port_id, + buf_in->error_type, + buf_in->error_value); + return ret; + } + + switch (buf_in->error_level) { + case FC_ERR_LEVEL_DEAD: + /* todo: chip reset */ + ret = RETURN_OK; + break; + + case FC_ERR_LEVEL_HIGH: + /* port reset */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, + UNF_PORT_ABNORMAL_RESET, NULL); + break; + + case FC_ERR_LEVEL_LOW: + ret = RETURN_OK; + break; + + default: + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive a unsupported UP Error Event Level(0x%x), Can not Process.", + v_hba->port_cfg.port_id, buf_in->error_level); + return ret; + } + if (buf_in->error_value < HIFC_ERR_VALUE_BUTT) + HIFC_UP_ERR_EVENT_STAT(v_hba, buf_in->error_value); + + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) process UP Error Event Level(0x%x) Type(0x%x) Value(0x%x) %s.", + v_hba->port_cfg.port_id, buf_in->error_level, + buf_in->error_type, buf_in->error_value, + (ret == UNF_RETURN_ERROR) ? "ERROR" : "OK"); + + HIFC_REFERNCE_VAR(buf_in, buf_in, ret); + + return ret; +} + +unsigned int hifc_get_topo_cfg(void *v_hba, void *v_topo_cfg) +{ + struct hifc_hba_s *hba = v_hba; + unsigned int *topo_cfg = v_topo_cfg; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_topo_cfg, return UNF_RETURN_ERROR); + + *topo_cfg = hba->port_topo_cfg; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Get topology config: 0x%x.", + *topo_cfg); + + return RETURN_OK; +} + +unsigned int hifc_get_topo_act(void *v_hba, void *topo_act) +{ + struct hifc_hba_s *hba = v_hba; + enum unf_act_topo_e *ret_topo_act = topo_act; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, topo_act, return UNF_RETURN_ERROR); + + /* Get topo from low_level */ + *ret_topo_act = hba->active_topo; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Get active topology: 0x%x", + *ret_topo_act); + + return RETURN_OK; +} + +unsigned int hifc_get_loop_alpa(void *v_hba, void *v_alpa) +{ + unsigned long flags = 0; + struct hifc_hba_s *hba = v_hba; + unsigned char *alpa = v_alpa; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_alpa, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&hba->hba_lock, flags); + *alpa = hba->active_al_pa; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Get active AL_PA(0x%x)", *alpa); + + return RETURN_OK; +} + +unsigned int hifc_get_lport_led(void *v_hba, void *v_led_state) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + struct hifc_led_state_s *led_state = v_led_state; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_led_state, return UNF_RETURN_ERROR); + + led_state->green_speed_led = hba->led_states.green_speed_led; + led_state->yellow_speed_led = hba->led_states.yellow_speed_led; + led_state->ac_led = hba->led_states.ac_led; + + return ret; +} + +unsigned int hifc_get_hardware_version(void *v_fc_port, void *v_version) +{ + struct hifc_hba_s *fc_port = (struct hifc_hba_s *)v_fc_port; + struct unf_version_str_s *version = + (struct unf_version_str_s *)v_version; + char *hard_ware_ver = NULL; + + HIFC_CHECK(INVALID_VALUE32, version, return UNF_RETURN_ERROR); + hard_ware_ver = version->buf; + HIFC_CHECK(INVALID_VALUE32, hard_ware_ver, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, fc_port, return UNF_RETURN_ERROR); + + hard_ware_ver[UNF_HW_VERSION_LEN - 1] = 0; + + return RETURN_OK; +} + +unsigned int hifc_get_sfp_info(void *v_fc_port, void *v_sfp_info) +{ + struct unf_lport_sfp_info *sfp_info = + (struct unf_lport_sfp_info *)v_sfp_info; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_fc_port; + struct hifc_inmbox_get_sfp_info_s get_sfp_info; + union hifc_outmbox_generic_u *get_sfp_info_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, sfp_info, return UNF_RETURN_ERROR); + + memset(&get_sfp_info, 0, sizeof(get_sfp_info)); + + get_sfp_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!get_sfp_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(get_sfp_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_sfp_info.header.cmnd_type = HIFC_MBOX_GET_SFP_INFO; + get_sfp_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_sfp_info_s)); + get_sfp_info.header.port_id = (hba->port_index); + + /* send mailbox and handle the return sts */ + if (hifc_mb_send_and_wait_mbox(hba, &get_sfp_info, sizeof(get_sfp_info), + get_sfp_info_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.", + hba->port_cfg.port_id, + get_sfp_info.header.cmnd_type); + + goto exit; + } + + sfp_info->status = get_sfp_info_sts->get_sfp_info_sts.status; + if (get_sfp_info_sts->get_sfp_info_sts.status != STATUS_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", + hba->port_cfg.port_id, + get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type, + get_sfp_info_sts->get_sfp_info_sts.status); + + goto exit; + } + + if (get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type != + HIFC_MBOX_GET_SFP_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) Receive mailbox type incorrect. Type: 0x%x.", + hba->port_cfg.port_id, + get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type); + + goto exit; + } + + /* the real sfpinfo is beyond the header of sts */ + memcpy(&sfp_info->sfp_eeprom_info, + ((unsigned char *)get_sfp_info_sts + + sizeof(get_sfp_info_sts->get_sfp_info_sts)), + sizeof(union unf_sfp_eeprome_info)); + + ret = RETURN_OK; +exit: + kfree(get_sfp_info_sts); + return ret; +} + +unsigned int hifc_get_port_info(void *v_hba) +{ + unsigned long flags = 0; + struct hifc_inmbox_get_port_info_s get_port_info; + union hifc_outmbox_generic_u *port_info_sts = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + unsigned int ret = UNF_RETURN_ERROR; + + memset(&get_port_info, 0, sizeof(get_port_info)); + port_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_port_info.header.cmnd_type = HIFC_MBOX_GET_PORT_INFO; + get_port_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_port_info_s)); + get_port_info.header.port_id = hba->port_index; + + if (hifc_mb_send_and_wait_mbox(hba, &get_port_info, + sizeof(get_port_info), port_info_sts) != + RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) send and wait mailbox type(0x%x) failed.", + hba->port_cfg.port_id, + get_port_info.header.cmnd_type); + + goto exit; + } + + if ((port_info_sts->get_port_info_sts.status != STATUS_OK) || + (port_info_sts->get_port_info_sts.header.cmnd_type != + HIFC_MBOX_GET_PORT_INFO_STS)) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) receive mailbox type(0x%x) status(0x%x) error.", + hba->port_cfg.port_id, + port_info_sts->get_port_info_sts.header.cmnd_type, + port_info_sts->get_port_info_sts.status); + + goto exit; + } + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->active_bb_scn = port_info_sts->get_port_info_sts.bbscn; + hba->active_rx_bb_credit = + port_info_sts->get_port_info_sts.non_loop_rx_credit; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + ret = RETURN_OK; +exit: + kfree(port_info_sts); + return ret; +} + +unsigned int hifc_get_port_current_info(void *v_hba, void *port_info) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_inmbox_get_port_info_s get_port_info; + union hifc_outmbox_generic_u *port_info_sts = NULL; + struct unf_get_port_info_argout *current_port_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, port_info, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + current_port_info = (struct unf_get_port_info_argout *)port_info; + + memset(&get_port_info, 0, sizeof(get_port_info)); + port_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_port_info.header.cmnd_type = HIFC_MBOX_GET_PORT_INFO; + get_port_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_port_info_s)); + get_port_info.header.port_id = hba->port_index; + + if (hifc_mb_send_and_wait_mbox(hba, &get_port_info, + sizeof(get_port_info), + port_info_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) send and wait mailbox type(0x%x) failed", + hba->port_cfg.port_id, + get_port_info.header.cmnd_type); + + goto exit; + } + + if ((port_info_sts->get_port_info_sts.status != STATUS_OK) || + (port_info_sts->get_port_info_sts.header.cmnd_type != + HIFC_MBOX_GET_PORT_INFO_STS)) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) receive mailbox type(0x%x) status(0x%x) error.", + hba->port_cfg.port_id, + port_info_sts->get_port_info_sts.header.cmnd_type, + port_info_sts->get_port_info_sts.status); + + goto exit; + } + + current_port_info->sfp_speed = + (unsigned char)port_info_sts->get_port_info_sts.sfp_speed; + current_port_info->present = + (unsigned char)port_info_sts->get_port_info_sts.present; + + ret = RETURN_OK; +exit: + kfree(port_info_sts); + return ret; +} + +static void hifc_get_fabric_login_params( + struct hifc_hba_s *hba, + struct unf_port_login_parms_s *v_param_addr) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&hba->hba_lock, flag); + hba->active_topo = v_param_addr->en_act_topo; + hba->compared_ratov_val = v_param_addr->compared_ratov_val; + hba->compared_edtov_val = v_param_addr->compared_edtov_val; + hba->compared_bbscn = v_param_addr->compared_bbscn; + hba->remote_edtov_tag = v_param_addr->remote_edtov_tag; + hba->remote_rttov_tag = v_param_addr->remote_rttov_tag; + hba->remote_bbcredit = v_param_addr->remote_bbcredit; + spin_unlock_irqrestore(&hba->hba_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) topo(0x%x) get fabric params: R_A_TOV(0x%x) E_D_TOV(%u) BB_CREDIT(0x%x) BB_SC_N(0x%x)", + hba->port_cfg.port_id, hba->active_topo, + hba->compared_ratov_val, hba->compared_edtov_val, + hba->remote_bbcredit, hba->compared_bbscn); +} + +static void hifc_get_port_login_params( + struct hifc_hba_s *hba, + struct unf_port_login_parms_s *v_param_addr) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&hba->hba_lock, flag); + hba->compared_ratov_val = v_param_addr->compared_ratov_val; + hba->compared_edtov_val = v_param_addr->compared_edtov_val; + hba->compared_bbscn = v_param_addr->compared_bbscn; + hba->remote_edtov_tag = v_param_addr->remote_edtov_tag; + hba->remote_rttov_tag = v_param_addr->remote_rttov_tag; + hba->remote_bbcredit = v_param_addr->remote_bbcredit; + spin_unlock_irqrestore(&hba->hba_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) Topo(0x%x) Get Port Params: R_A_TOV(0x%x), E_D_TOV(0x%x), BB_CREDIT(0x%x), BB_SC_N(0x%x).", + hba->port_cfg.port_id, hba->active_topo, + hba->compared_ratov_val, hba->compared_edtov_val, + hba->remote_bbcredit, hba->compared_bbscn); +} + +unsigned int hifc_update_fabric_param(void *v_hba, void *v_para_in) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + struct unf_port_login_parms_s *login_coparms = v_para_in; + + UNF_CHECK_VALID(0x4923, UNF_B_TRUE, hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4924, UNF_B_TRUE, v_para_in, return UNF_RETURN_ERROR); + + hifc_get_fabric_login_params(hba, login_coparms); + + if ((hba->active_topo == UNF_ACT_TOP_P2P_FABRIC) || + (hba->active_topo == UNF_ACT_TOP_PUBLIC_LOOP)) { + if (hba->work_mode == HIFC_SMARTIO_WORK_MODE_FC) + ret = hifc_config_login_api(hba, login_coparms); + } + + return ret; +} + +unsigned int hifc_update_port_param(void *v_hba, void *v_para_in) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + struct unf_port_login_parms_s *login_coparms = + (struct unf_port_login_parms_s *)v_para_in; + + UNF_CHECK_VALID(0x4923, UNF_B_TRUE, hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4924, UNF_B_TRUE, v_para_in, return UNF_RETURN_ERROR); + + if ((hba->active_topo == UNF_ACT_TOP_PRIVATE_LOOP) || + (hba->active_topo == UNF_ACT_TOP_P2P_DIRECT)) { + hifc_get_port_login_params(hba, login_coparms); + ret = hifc_config_login_api(hba, login_coparms); + } + + hifc_save_login_para_in_sq_info(hba, login_coparms); + + return ret; +} + +unsigned int hifc_clear_port_error_code(void *v_hba, void *v_err_code) +{ + return RETURN_OK; +} + +unsigned int hifc_get_and_clear_port_error_code(void *v_hba, void *v_err_code) +{ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + struct hifc_inmbox_get_err_code_s get_err_code; + union hifc_outmbox_generic_u *err_code_sts = NULL; + struct unf_err_code_s *unf_err_code = + (struct unf_err_code_s *)v_err_code; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, unf_err_code, return UNF_RETURN_ERROR); + + memset(&get_err_code, 0, sizeof(get_err_code)); + + err_code_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!err_code_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(err_code_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_err_code.header.cmnd_type = HIFC_MBOX_GET_ERR_CODE; + get_err_code.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_err_code_s)); + + if (hifc_mb_send_and_wait_mbox(hba, &get_err_code, sizeof(get_err_code), + err_code_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.", + hba->port_cfg.port_id, + get_err_code.header.cmnd_type); + + goto exit; + } + + if (err_code_sts->get_err_code_sts.status != STATUS_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect, status: 0x%x.", + hba->port_cfg.port_id, + err_code_sts->get_err_code_sts.header.cmnd_type, + err_code_sts->get_err_code_sts.status); + + goto exit; + } + + unf_err_code->link_fail_count = + err_code_sts->get_err_code_sts.err_code[0]; + unf_err_code->loss_of_sync_count = + err_code_sts->get_err_code_sts.err_code[1]; + unf_err_code->loss_of_signal_count = + err_code_sts->get_err_code_sts.err_code[2]; + unf_err_code->proto_error_count = + err_code_sts->get_err_code_sts.err_code[3]; + unf_err_code->bad_rx_char_count = + err_code_sts->get_err_code_sts.err_code[4]; + unf_err_code->bad_crc_count = + err_code_sts->get_err_code_sts.err_code[5]; + unf_err_code->rx_eo_fa_count = + err_code_sts->get_err_code_sts.err_code[6]; + unf_err_code->dis_frame_count = + err_code_sts->get_err_code_sts.err_code[7]; + + ret = RETURN_OK; +exit: + kfree(err_code_sts); + return ret; +} + +unsigned int hifc_get_work_bale_bbcredit(void *v_hba, void *v_bbcredit) +{ + unsigned int *bb_credit = (unsigned int *)v_bbcredit; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_bbcredit, return UNF_RETURN_ERROR); + + if (hba->active_port_speed == UNF_PORT_SPEED_32_G) + *bb_credit = HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT; + else if (hba->active_port_speed == UNF_PORT_SPEED_16_G) + *bb_credit = HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT; + else + *bb_credit = HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT; + + return RETURN_OK; +} + +unsigned int hifc_get_work_bale_bbscn(void *v_hba, void *v_bbscn) +{ + unsigned int *bbscn = (unsigned int *)v_bbscn; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_bbscn, return UNF_RETURN_ERROR); + + *bbscn = hba->port_bbscn_cfg; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, "Return BBSCN(0x%x) to CM", + *bbscn); + + return RETURN_OK; +} + +unsigned int hifc_get_software_version(void *v_hba, void *v_version) +{ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + struct hifc_inmbox_get_fw_version_s fw_ver; + union hifc_outmbox_generic_u *fw_ver_sts = NULL; + unsigned char *ver = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_version, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + + memset(&fw_ver, 0, sizeof(fw_ver)); + fw_ver_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), GFP_ATOMIC); + if (!fw_ver_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(fw_ver_sts, 0, sizeof(union hifc_outmbox_generic_u)); + ver = (unsigned char *)&fw_ver_sts->get_fw_ver_sts; + + fw_ver.header.cmnd_type = HIFC_MBOX_GET_FW_VERSION; + fw_ver.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_fw_version_s)); + + if (hifc_mb_send_and_wait_mbox(hba, &fw_ver, sizeof(fw_ver), + fw_ver_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) can't send and wait mailbox, command type: 0x%x.", + hba->port_cfg.port_id, + fw_ver.header.cmnd_type); + + goto exit; + } + + if (fw_ver_sts->get_fw_ver_sts.header.cmnd_type != + HIFC_MBOX_GET_FW_VERSION_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) recv mailbox type(0x%x) incorrect.", + hba->port_cfg.port_id, + fw_ver_sts->get_fw_ver_sts.header.cmnd_type); + + goto exit; + } + + if (fw_ver_sts->get_fw_ver_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) Receive mailbox type(0x%x) status(0x%x) incorrect.", + hba->port_cfg.port_id, + fw_ver_sts->get_fw_ver_sts.header.cmnd_type, + fw_ver_sts->get_fw_ver_sts.status); + + goto exit; + } + + memcpy(v_version, ver + HIFC_VER_ADDR_OFFSET, + sizeof(struct hifc_outmbox_get_fw_version_sts_s) - + HIFC_VER_ADDR_OFFSET); + + ret = RETURN_OK; +exit: + kfree(fw_ver_sts); + return ret; +} + +unsigned int hifc_get_firmware_version(void *v_fc_port, void *v_version) +{ + struct hifc_hba_s *fc_port = (struct hifc_hba_s *)v_fc_port; + struct unf_version_str_s *version = + (struct unf_version_str_s *)v_version; + char *fw_ver = NULL; + + HIFC_CHECK(INVALID_VALUE32, version, return UNF_RETURN_ERROR); + fw_ver = version->buf; + HIFC_CHECK(INVALID_VALUE32, fw_ver, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, fc_port, return UNF_RETURN_ERROR); + + fw_ver[UNF_FW_VERSION_LEN - 1] = 0; + + return RETURN_OK; +} + +unsigned int hifc_get_loop_map(void *v_hba, void *v_buf) +{ + unsigned long flags = 0; + struct unf_buf_s *buf = (struct unf_buf_s *)v_buf; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buf, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buf->cbuf, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buf->buf_len, return UNF_RETURN_ERROR); + + if (buf->buf_len > UNF_LOOPMAP_COUNT) + return UNF_RETURN_ERROR; + + spin_lock_irqsave(&hba->hba_lock, flags); + if (hba->loop_map_valid != LOOP_MAP_VALID) { + spin_unlock_irqrestore(&hba->hba_lock, flags); + return UNF_RETURN_ERROR; + } + memcpy(buf->cbuf, hba->loop_map, buf->buf_len); /* do memcpy */ + spin_unlock_irqrestore(&hba->hba_lock, flags); + + return RETURN_OK; +} + +unsigned int hifc_get_speed_cfg(void *v_hba, void *v_speed_cfg) +{ + struct hifc_hba_s *hba = v_hba; + unsigned int *speed_cfg = v_speed_cfg; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_speed_cfg, return UNF_RETURN_ERROR); + + *speed_cfg = hba->port_speed_cfg; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Get config link rate: 0x%x.", + *speed_cfg); + + return RETURN_OK; +} + +unsigned int hifc_get_speed_act(void *v_hba, void *v_speed_act) +{ + struct hifc_hba_s *hba = v_hba; + unsigned int *speed_act = v_speed_act; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_speed_act, return UNF_RETURN_ERROR); + + *speed_act = hba->active_port_speed; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Get config link rate: 0x%x.", + *speed_act); + return RETURN_OK; +} + +unsigned int hifc_get_port_fec(void *v_hba, void *v_para_out) +{ + struct hifc_hba_s *hba = v_hba; + int *fec = v_para_out; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, fec, return UNF_RETURN_ERROR); + + *fec = (hba->fec_status) ? UNF_TRUE : UNF_FALSE; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Get Port fec: 0x%x.", + (hba->fec_status)); + return RETURN_OK; +} + +unsigned int hifc_save_hba_info(void *v_hba, void *v_para_in) +{ + struct hifc_inmbox_save_hba_info_s *hba_info = NULL; + struct hifc_outmbox_save_hba_info_sts_s *hba_info_sts = NULL; + void *hba_info_addr = v_para_in; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_para_in, return UNF_RETURN_ERROR); + + hba_info = vmalloc(sizeof(struct hifc_inmbox_save_hba_info_s)); + + if (!hba_info) + return UNF_RETURN_ERROR; + + hba_info_sts = vmalloc(sizeof(struct hifc_outmbox_save_hba_info_sts_s)); + + if (!hba_info_sts) { + vfree(hba_info); + return UNF_RETURN_ERROR; + } + + memset(hba_info, 0, sizeof(struct hifc_inmbox_save_hba_info_s)); + memset(hba_info_sts, 0, + sizeof(struct hifc_outmbox_save_hba_info_sts_s)); + + hba_info->header.cmnd_type = HIFC_MBOX_SAVE_HBA_INFO; + hba_info->header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_save_hba_info_s)); + + /* fill mailbox payload */ + memcpy(&hba_info->hba_save_info[0], hba_info_addr, SAVE_PORT_INFO_LEN); + + /* send & wait mailbox */ + if (hifc_mb_send_and_wait_mbox( + hba, hba_info, + sizeof(*hba_info), + (union hifc_outmbox_generic_u *)hba_info_sts) + != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) HIFC can't send and wait mailbox, command type(0x%x)", + hba->port_cfg.port_id, + hba_info->header.cmnd_type); + + vfree(hba_info); + vfree(hba_info_sts); + + return UNF_RETURN_ERROR; + } + + /* check mailbox rsp status */ + if (hba_info_sts->status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) status(0x%x) error", + hba->port_cfg.port_id, + hba_info_sts->header.cmnd_type, + hba_info_sts->status); + + vfree(hba_info); + vfree(hba_info_sts); + + return UNF_RETURN_ERROR; + } + + /* check mailbox rsp type */ + if (hba_info_sts->header.cmnd_type != HIFC_MBOX_SAVE_HBA_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) error", + hba->port_cfg.port_id, + hba_info_sts->header.cmnd_type); + + vfree(hba_info); + vfree(hba_info_sts); + + return UNF_RETURN_ERROR; + } + + memcpy(hba_info_addr, &hba_info_sts->save_hba_info[0], + SAVE_PORT_INFO_LEN - 8); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[event]Port(0x%x) save hba info succeed", + hba->port_cfg.port_id); + + vfree(hba_info); + vfree(hba_info_sts); + + return RETURN_OK; +} + +unsigned int hifc_mbox_reset_chip(struct hifc_hba_s *v_hba, + unsigned char v_sub_type) +{ + struct hifc_inmbox_port_reset_s port_reset; + union hifc_outmbox_generic_u *port_reset_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + memset(&port_reset, 0, sizeof(port_reset)); + + port_reset_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_reset_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_reset_sts, 0, sizeof(union hifc_outmbox_generic_u)); + port_reset.header.cmnd_type = HIFC_MBOX_PORT_RESET; + port_reset.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_port_reset_s)); + port_reset.op_code = v_sub_type; + + if (hifc_mb_send_and_wait_mbox(v_hba, &port_reset, sizeof(port_reset), + port_reset_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) can't send and wait mailbox with command type(0x%x)", + v_hba->port_cfg.port_id, + port_reset.header.cmnd_type); + + goto exit; + } + + if (port_reset_sts->port_reset_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) receive mailbox type(0x%x) status(0x%x) incorrect", + v_hba->port_cfg.port_id, + port_reset_sts->port_reset_sts.header.cmnd_type, + port_reset_sts->port_reset_sts.status); + + goto exit; + } + + if (port_reset_sts->port_reset_sts.header.cmnd_type != + HIFC_MBOX_PORT_RESET_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) recv mailbox type(0x%x) incorrect", + v_hba->port_cfg.port_id, + port_reset_sts->port_reset_sts.header.cmnd_type); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[info]Port(0x%x) reset chip mailbox success", + v_hba->port_cfg.port_id); + + ret = RETURN_OK; +exit: + kfree(port_reset_sts); + return ret; +} + +unsigned int hifc_clear_sq_wqe_done(struct hifc_hba_s *v_hba) +{ + int async_ret = RETURN_OK; + struct hifc_inmbx_clear_node_s clear_done; + + clear_done.header.cmnd_type = HIFC_MBOX_BUFFER_CLEAR_DONE; + clear_done.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbx_clear_node_s)); + clear_done.header.port_id = v_hba->port_index; + + async_ret = hifc_msg_to_mgmt_async(v_hba->hw_dev_handle, + HIFC_MOD_FC, + HIFC_MBOX_BUFFER_CLEAR_DONE, + &clear_done, sizeof(clear_done)); + + if (async_ret != 0) { + HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CLEAR_DONE_FAIL); + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC Port(0x%x) can't send clear done cmd to up, ret:%d", + v_hba->port_cfg.port_id, async_ret); + + return UNF_RETURN_ERROR; + } + + HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CLEAR_DONE); + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHDONE; + v_hba->next_clearing_sq = 0; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[info]Port(0x%x) clear done msg(0x%x) sent to up succeed with stage(0x%x)", + v_hba->port_cfg.port_id, + clear_done.header.cmnd_type, v_hba->q_set_stage); + + return RETURN_OK; +} + +unsigned int hifc_mbx_get_fw_clear_stat(struct hifc_hba_s *v_hba, + unsigned int *v_clear_state) +{ + struct hifc_inmbox_get_clear_state_s clr_state; + union hifc_outmbox_generic_u *port_clr_state_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_clear_state, return UNF_RETURN_ERROR); + + memset(&clr_state, 0, sizeof(clr_state)); + + port_clr_state_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_clr_state_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_clr_state_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + clr_state.header.cmnd_type = HIFC_MBOX_GET_CLEAR_STATE; + clr_state.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_clear_state_s)); + + if (hifc_mb_send_and_wait_mbox(v_hba, &clr_state, sizeof(clr_state), + port_clr_state_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x", + clr_state.header.cmnd_type); + + goto exit; + } + + if (port_clr_state_sts->get_clr_state_sts.status != RETURN_OK) { + HIFC_TRACE( + UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x, state 0x%x.", + v_hba->port_cfg.port_id, + port_clr_state_sts->get_clr_state_sts.header.cmnd_type, + port_clr_state_sts->get_clr_state_sts.status, + port_clr_state_sts->get_clr_state_sts.state); + + goto exit; + } + + if (port_clr_state_sts->get_clr_state_sts.header.cmnd_type != + HIFC_MBOX_GET_CLEAR_STATE_STS) { + HIFC_TRACE( + UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) recv mailbox type(0x%x) incorrect.", + v_hba->port_cfg.port_id, + port_clr_state_sts->get_clr_state_sts.header.cmnd_type); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "Port(0x%x) get port clear state 0x%x.", + v_hba->port_cfg.port_id, + port_clr_state_sts->get_clr_state_sts.state); + + *v_clear_state = port_clr_state_sts->get_clr_state_sts.state; + + ret = RETURN_OK; +exit: + kfree(port_clr_state_sts); + return ret; +} + +unsigned int hifc_mbx_set_fec(struct hifc_hba_s *v_hba, + unsigned int v_fec_opcode) +{ + struct hifc_inmbox_config_fec_s cfg_fec; + union hifc_outmbox_generic_u *port_fec_state_sts = NULL; + unsigned char op_code = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + memset(&cfg_fec, 0, sizeof(cfg_fec)); + + port_fec_state_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_fec_state_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_fec_state_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + op_code = (unsigned char)v_fec_opcode; + + cfg_fec.header.cmnd_type = HIFC_MBOX_CONFIG_FEC; + cfg_fec.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(HIFC_MBOX_CONFIG_FEC)); + cfg_fec.fec_op_code = op_code; + + if (hifc_mb_send_and_wait_mbox(v_hba, &cfg_fec, sizeof(cfg_fec), + port_fec_state_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) hifc can't send and wait mailbox, command type: 0x%x", + v_hba->port_cfg.port_id, cfg_fec.header.cmnd_type); + + goto exit; + } + + if (port_fec_state_sts->config_fec_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", + v_hba->port_cfg.port_id, + port_fec_state_sts->config_fec_sts.header.cmnd_type, + port_fec_state_sts->config_fec_sts.status); + + goto exit; + } + + if (port_fec_state_sts->config_fec_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_FEC_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) recv mailbox type(0x%x) incorrect.", + v_hba->port_cfg.port_id, + port_fec_state_sts->config_fec_sts.header.cmnd_type); + + goto exit; + } + + v_hba->fec_status = v_fec_opcode; + + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_MAJOR, + "Port(0x%x) set FEC Status is %u.", + v_hba->port_cfg.port_id, op_code); + + ret = RETURN_OK; +exit: + kfree(port_fec_state_sts); + return ret; +} + +unsigned int hifc_notify_up_config_timer(struct hifc_hba_s *v_hba, int op_code, + unsigned int user_data) +{ + struct hifc_inmbox_config_timer_s time_cfg; + union hifc_outmbox_generic_u *time_cfg_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + memset(&time_cfg, 0, sizeof(time_cfg)); + + time_cfg_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!time_cfg_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(time_cfg_sts, 0, sizeof(union hifc_outmbox_generic_u)); + time_cfg.header.cmnd_type = HIFC_MBOX_CONFIG_TIMER; + time_cfg.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_config_timer_s)); + time_cfg.op_code = (unsigned short)op_code; + time_cfg.fun_id = hifc_global_func_id(v_hba->hw_dev_handle); + time_cfg.user_data = user_data; + + if (hifc_mb_send_and_wait_mbox(v_hba, &time_cfg, sizeof(time_cfg), + time_cfg_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) hifc can't send and wait mailbox with command type(0x%x)", + v_hba->port_cfg.port_id, time_cfg.header.cmnd_type); + + goto exit; + } + + if (time_cfg_sts->timer_config_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_TIMER_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) recv mailbox type(0x%x) incorrect", + v_hba->port_cfg.port_id, + time_cfg_sts->timer_config_sts.header.cmnd_type); + + goto exit; + } + + if (time_cfg_sts->timer_config_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) Receive mailbox type(0x%x) status(0x%x) incorrect", + v_hba->port_cfg.port_id, + time_cfg_sts->timer_config_sts.header.cmnd_type, + time_cfg_sts->timer_config_sts.status); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) notify uP to %s timer success", + v_hba->port_cfg.port_id, op_code ? "open" : "close"); + + ret = RETURN_OK; +exit: + kfree(time_cfg_sts); + return ret; +} + +unsigned int hifc_get_flash_data(void *v_hba, void *v_flash_data) +{ + struct hifc_hba_s *hba = NULL; + struct unf_mbox_flash_data_mgmt_s *flash_data_mgmt = NULL; + union hifc_outmbox_generic_u *flash_data_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_flash_data, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + flash_data_mgmt = kmalloc(sizeof(struct unf_mbox_flash_data_mgmt_s), + GFP_ATOMIC); + + if (!flash_data_mgmt) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData."); + return ret; + } + flash_data_sts = kmalloc(sizeof(struct unf_flash_data_mgmt_sts_s), + GFP_ATOMIC); + + if (!flash_data_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData sts."); + kfree(flash_data_mgmt); + return ret; + } + memset(flash_data_mgmt, 0, sizeof(struct unf_mbox_flash_data_mgmt_s)); + memset(flash_data_sts, 0, sizeof(struct unf_flash_data_mgmt_sts_s)); + flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT; + flash_data_mgmt->mbox_head.length = 1; /* not used */ + flash_data_mgmt->mbox_head.op_code = 0; /* read config */ + + if (hifc_mb_send_and_wait_mbox( + hba, flash_data_mgmt, + sizeof(struct unf_mbox_flash_data_mgmt_s), + flash_data_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + flash_data_mgmt->mbox_head.cmnd_type); + + goto exit; + } + + if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.status); + + goto exit; + } + + if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type != + HIFC_MBOX_FLASH_DATA_MGMT_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto exit; + } + + memcpy((unsigned char *)v_flash_data, + (unsigned char *)&flash_data_sts->flash_data_sts.flash_data, + sizeof(struct unf_flash_data_s)); + ret = RETURN_OK; +exit: + kfree(flash_data_mgmt); + kfree(flash_data_sts); + return ret; +} + +unsigned int hifc_set_flash_data(void *v_hba, void *v_flash_data) +{ + struct hifc_hba_s *hba = NULL; + struct unf_mbox_flash_data_mgmt_s *flash_data_mgmt = NULL; + union hifc_outmbox_generic_u *flash_data_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_flash_data, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + flash_data_mgmt = kmalloc(sizeof(struct unf_mbox_flash_data_mgmt_s), + GFP_ATOMIC); + + if (!flash_data_mgmt) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData."); + return ret; + } + flash_data_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + + if (!flash_data_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData sts."); + kfree(flash_data_mgmt); + return ret; + } + memset(flash_data_sts, 0, sizeof(union hifc_outmbox_generic_u)); + memset(flash_data_mgmt, 0, sizeof(struct unf_mbox_flash_data_mgmt_s)); + flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT; + flash_data_mgmt->mbox_head.length = 1; /* not used */ + flash_data_mgmt->mbox_head.op_code = 2; /* flash config */ + + if (hifc_mb_send_and_wait_mbox( + hba, flash_data_mgmt, + sizeof(struct unf_mbox_flash_data_mgmt_s), + flash_data_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "hifc can't send and wait mailbox, command type: 0x%x.", + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.status); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type != + HIFC_MBOX_FLASH_DATA_MGMT_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT; + flash_data_mgmt->mbox_head.length = 1; /* not used */ + flash_data_mgmt->mbox_head.op_code = 1; /* write config */ + memcpy(&flash_data_mgmt->flash_data, + (unsigned char *)v_flash_data, sizeof(struct unf_flash_data_s)); + + if (hifc_mb_send_and_wait_mbox( + hba, flash_data_mgmt, + sizeof(struct unf_mbox_flash_data_mgmt_s), + flash_data_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.status); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type != + HIFC_MBOX_FLASH_DATA_MGMT_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + ret = RETURN_OK; +END: + kfree(flash_data_mgmt); + kfree(flash_data_sts); + return ret; +} diff --git a/drivers/scsi/huawei/hifc/hifc_chipitf.h b/drivers/scsi/huawei/hifc/hifc_chipitf.h new file mode 100644 index 000000000000..8b4915d2a990 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_chipitf.h @@ -0,0 +1,643 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_CHIPITF_H__ +#define __HIFC_CHIPITF_H__ + +#include "unf_log.h" +#include "hifc_utils.h" +#include "hifc_module.h" +#include "hifc_service.h" + +/* CONF_API_CMND */ +#define HIFC_MBOX_CONFIG_API 0x00 +#define HIFC_MBOX_CONFIG_API_STS 0xA0 + +/* GET_CHIP_INFO_API_CMD */ +#define HIFC_MBOX_GET_CHIP_INFO 0x01 +#define HIFC_MBOX_GET_CHIP_INFO_STS 0xA1 + +/* PORT_RESET */ +#define HIFC_MBOX_PORT_RESET 0x02 +#define HIFC_MBOX_PORT_RESET_STS 0xA2 + +/* SFP_SWITCH_API_CMND */ +#define HIFC_MBOX_PORT_SWITCH 0x03 +#define HIFC_MBOX_PORT_SWITCH_STS 0xA3 + +/* GET_SFP_INFO */ +#define HIFC_MBOX_GET_SFP_INFO 0x04 +#define HIFC_MBOX_GET_SFP_INFO_STS 0xA4 + +/* CONF_AF_LOGIN_API_CMND */ +#define HIFC_MBOX_CONFIG_LOGIN_API 0x06 +#define HIFC_MBOX_CONFIG_LOGIN_API_STS 0xA6 + +/* BUFFER_CLEAR_DONE_CMND */ +#define HIFC_MBOX_BUFFER_CLEAR_DONE 0x07 +#define HIFC_MBOX_BUFFER_CLEAR_DONE_STS 0xA7 + +#define HIFC_MBOX_GET_ERR_CODE 0x08 +#define HIFC_MBOX_GET_ERR_CODE_STS 0xA8 + +#define HIFC_MBOX_GET_UP_STATE 0x09 +#define HIFC_MBOX_GET_UP_STATE_STS 0xA9 + +/* LOOPBACK MODE */ +#define HIFC_MBOX_LOOPBACK_MODE 0x0A +#define HIFC_MBOX_LOOPBACK_MODE_STS 0xAA + +/* REG RW MODE */ +#define HIFC_MBOX_REG_RW_MODE 0x0B +#define HIFC_MBOX_REG_RW_MODE_STS 0xAB + +/* GET CLEAR DONE STATE */ +#define HIFC_MBOX_GET_CLEAR_STATE 0x0E +#define HIFC_MBOX_GET_CLEAR_STATE_STS 0xAE + +/* GET UP & UCODE VER */ +#define HIFC_MBOX_GET_FW_VERSION 0x0F +#define HIFC_MBOX_GET_FW_VERSION_STS 0xAF + +/* CONFIG TIMER */ +#define HIFC_MBOX_CONFIG_TIMER 0x10 +#define HIFC_MBOX_CONFIG_TIMER_STS 0xB0 + +/* CONFIG SRQC */ +#define HIFC_MBOX_CONFIG_SRQC 0x11 +#define HIFC_MBOX_CONFIG_SRQC_STS 0xB1 + +/* Led Test */ +#define HIFC_MBOX_LED_TEST 0x12 +#define HIFC_MBOX_LED_TEST_STS 0xB2 + +/* set esch */ +#define HIFC_MBOX_SET_ESCH 0x13 +#define HIFC_MBOX_SET_ESCH_STS 0xB3 + +/* set get tx serdes */ +#define HIFC_MBOX_SET_GET_SERDES_TX 0x14 +#define HIFC_MBOX_SET_GET_SERDES_TX_STS 0xB4 + +/* get rx serdes */ +#define HIFC_MBOX_GET_SERDES_RX 0x15 +#define HIFC_MBOX_GET_SERDES_RX_STS 0xB5 + +/* i2c read write */ +#define HIFC_MBOX_I2C_WR_RD 0x16 +#define HIFC_MBOX_I2C_WR_RD_STS 0xB6 + +/* Set FEC Enable */ +#define HIFC_MBOX_CONFIG_FEC 0x17 +#define HIFC_MBOX_CONFIG_FEC_STS 0xB7 + +/* GET UCODE STATS CMD */ +#define HIFC_MBOX_GET_UCODE_STAT 0x18 +#define HIFC_MBOX_GET_UCODE_STAT_STS 0xB8 + +/* gpio read write */ +#define HIFC_MBOX_GPIO_WR_RD 0x19 +#define HIFC_MBOX_GPIO_WR_RD_STS 0xB9 + +/* GET PORT INFO CMD */ +#define HIFC_MBOX_GET_PORT_INFO 0x20 +#define HIFC_MBOX_GET_PORT_INFO_STS 0xC0 + +/* save hba info CMD */ +#define HIFC_MBOX_SAVE_HBA_INFO 0x24 +#define HIFC_MBOX_SAVE_HBA_INFO_STS 0xc4 + +#define HIFC_MBOX_FLASH_DATA_MGMT 0x25 +#define HIFC_MBOX_FLASH_DATA_MGMT_STS 0xc5 + +/* FCOE: DRV->UP */ +#define HIFC_MBOX_SEND_ELS_CMD 0x2A +#define HIFC_MBOX_SEND_VPORT_INFO 0x2B + +/* FC: UP->DRV */ +#define HIFC_MBOX_RECV_FC_LINKUP 0x40 +#define HIFC_MBOX_RECV_FC_LINKDOWN 0x41 +#define HIFC_MBOX_RECV_FC_DELCMD 0x42 +#define HIFC_MBOX_RECV_FC_ERROR 0x43 + +#define LOOP_MAP_VALID 1 +#define LOOP_MAP_INVALID 0 + +#define HIFC_MBOX_SIZE 1024 +#define HIFC_MBOX_HEADER_SIZE 4 + +#define ATUOSPEED 1 +#define FIXEDSPEED 0 +#define UNDEFINEOPCODE 0 + +#define VALUEMASK_L 0x00000000FFFFFFFF +#define VALUEMASK_H 0xFFFFFFFF00000000 + +#define STATUS_OK 0 +#define STATUS_FAIL 1 + +enum hifc_drv_2_up_unblock_msg_cmd_code_e { + HIFC_SEND_ELS_CMD, + HIFC_SEND_ELS_CMD_FAIL, + HIFC_RCV_ELS_CMD_RSP, + HIFC_SEND_CONFIG_LOGINAPI, + HIFC_SEND_CONFIG_LOGINAPI_FAIL, + HIFC_RCV_CONFIG_LOGIN_API_RSP, + HIFC_SEND_CLEAR_DONE, + HIFC_SEND_CLEAR_DONE_FAIL, + HIFC_RCV_CLEAR_DONE_RSP, + HIFC_SEND_VPORT_INFO_DONE, + HIFC_SEND_VPORT_INFO_FAIL, + HIFC_SEND_VPORT_INFO_RSP, + HIFC_MBOX_CMD_BUTT + +}; + +/* up to driver handle templete */ +struct hifc_up_2_drv_msg_handle_s { + unsigned char cmd; + unsigned int (*pfn_hifc_msg_up2drv_handler)(struct hifc_hba_s *v_hba, + void *v_buf_in); +}; + +/* Mbox Common Header */ +struct hifc_mbox_header_s { + unsigned char cmnd_type; + unsigned char length; + unsigned char port_id; + unsigned char reserved; + +}; + +/* open or close the sfp */ +struct hifc_inbox_port_switch_s { + struct hifc_mbox_header_s header; + + unsigned char op_code; + unsigned char port_type; + unsigned short reserved; + + unsigned char host_id; + unsigned char pf_id; + unsigned char fcoe_mode; + unsigned char reserved2; + + unsigned short conf_vlan; + unsigned short reserved3; + + unsigned long long sys_port_wwn; + unsigned long long sys_node_name; +}; + +struct hifc_outbox_port_switch_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +/* config API */ +struct hifc_inbox_config_api_s { + struct hifc_mbox_header_s header; + + unsigned int op_code : 8; + unsigned int reserved1 : 24; + + unsigned char topy_mode; + unsigned char sfp_speed; + unsigned char max_speed; + unsigned char hard_alpa; + + unsigned char port_name[UNF_WWN_LEN]; + + unsigned int slave : 1; + unsigned int auto_sneg : 1; + unsigned int reserved2 : 30; + + unsigned int rx_bbcredit_32g : 16; /* 160 */ + unsigned int rx_bbcredit_16g : 16; /* 80 */ + unsigned int rx_bbcredit_842g : 16; /* 50 */ + unsigned int rdy_cnt_bf_fst_frm : 16; /* 8 */ + + unsigned int esch_value_32g; + unsigned int esch_value_16g; + unsigned int esch_value_8g; + unsigned int esch_value_4g; + unsigned int esch_value_2g; + unsigned int esch_bust_size; +}; + +struct hifc_outbox_config_api_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +/* Get chip info */ +struct hifc_inbox_get_chip_info_s { + struct hifc_mbox_header_s header; + +}; + +struct hifc_outbox_get_chip_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned char status; + unsigned char board_type; + unsigned char rvsd; + unsigned char tape_support : 1; + unsigned char reserved : 7; + + unsigned long long wwpn; + unsigned long long wwnn; + unsigned long long sys_mac; + +}; + +/* Get reg info */ +struct hifc_inmbox_get_reg_info_s { + struct hifc_mbox_header_s header; + unsigned int op_code : 1; + unsigned int reg_len : 8; + unsigned int rsvd : 23; + unsigned int reg_addr; + unsigned int reg_value_l32; + unsigned int reg_value_h32; + unsigned int rvsd[27]; +}; + +/* Get reg info sts */ +struct hifc_outmbox_get_reg_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned short rvsd0; + unsigned char rvsd1; + unsigned char status; + unsigned int reg_value_l32; + unsigned int reg_value_h32; + unsigned int rvsd[28]; +}; + +/* Config login API */ +struct hifc_inmbox_config_login_s { + struct hifc_mbox_header_s header; + + unsigned int op_code : 8; + unsigned int reserved1 : 24; + + unsigned short tx_bb_credit; + unsigned short reserved2; + + unsigned int rtov; + unsigned int etov; + + unsigned int rt_tov_tag : 1; + unsigned int ed_tov_tag : 1; + unsigned int bb_credit : 6; + unsigned int bbscn : 8; + unsigned int lr_flag : 16; +}; + +struct hifc_outmbox_config_login_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +/* port reset */ +#define HIFC_MBOX_SUBTYPE_LIGHT_RESET 0x0 +#define HIFC_MBOX_SUBTYPE_HEAVY_RESET 0x1 + +struct hifc_inmbox_port_reset_s { + struct hifc_mbox_header_s header; + + unsigned int op_code : 8; + unsigned int reserved1 : 24; +}; + +struct hifc_outmbox_port_reset_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +struct hifc_inmbox_get_sfp_info_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_outmbox_get_sfp_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned int rcvd : 8; + unsigned int length : 16; + unsigned int status : 8; +}; + +/* get and clear error code */ +struct hifc_inmbox_get_err_code_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_outmbox_get_err_code_sts_s { + struct hifc_mbox_header_s header; + + unsigned short rsvd; + unsigned char rsvd2; + unsigned char status; + + unsigned int err_code[8]; +}; + +/* uP-->Driver asyn event API */ +struct hifc_link_event_s { + struct hifc_mbox_header_s header; + + unsigned char link_event; + unsigned char reason; + unsigned char speed; + unsigned char top_type; + + unsigned char alpa_value; + unsigned char reserved1; + unsigned short paticpate : 1; + unsigned short acled : 1; + unsigned short yellow_speed_led : 1; + unsigned short green_speed_led : 1; + unsigned short reserved : 12; + + unsigned char loop_map_info[128]; +}; + +enum hifc_up_err_type_e { + HIFC_UP_ERR_DRV_PARA = 0, + HIFC_UP_ERR_SFP = 1, + HIFC_UP_ERR_32G_PUB = 2, + HIFC_UP_ERR_32G_UA = 3, + HIFC_UP_ERR_32G_MAC = 4, + HIFC_UP_ERR_NON32G_DFX = 5, + HIFC_UP_ERR_NON32G_MAC = 6, + HIFC_UP_ERR_BUTT +}; + +enum hifc_up_err_value_e { + /* ERR type 0 */ + HIFC_DRV_2_UP_PARA_ERR = 0, + + /* ERR type 1 */ + HIFC_SFP_SPEED_ERR, + + /* ERR type 2 */ + HIFC_32GPUB_UA_RXESCH_FIFO_OF, + HIFC_32GPUB_UA_RXESCH_FIFO_UCERR, + + /* ERR type 3 */ + HIFC_32G_UA_UATX_LEN_ABN, + HIFC_32G_UA_RXAFIFO_OF, + HIFC_32G_UA_TXAFIFO_OF, + HIFC_32G_UA_RXAFIFO_UCERR, + HIFC_32G_UA_TXAFIFO_UCERR, + + /* ERR type 4 */ + HIFC_32G_MAC_RX_BBC_FATAL, + HIFC_32G_MAC_TX_BBC_FATAL, + HIFC_32G_MAC_TXFIFO_UF, + HIFC_32G_MAC_PCS_TXFIFO_UF, + HIFC_32G_MAC_RXBBC_CRDT_TO, + HIFC_32G_MAC_PCS_RXAFIFO_OF, + HIFC_32G_MAC_PCS_TXFIFO_OF, + HIFC_32G_MAC_FC2P_RXFIFO_OF, + HIFC_32G_MAC_FC2P_TXFIFO_OF, + HIFC_32G_MAC_FC2P_CAFIFO_OF, + HIFC_32G_MAC_PCS_RXRSFECM_UCEER, + HIFC_32G_MAC_PCS_RXAFIFO_UCEER, + HIFC_32G_MAC_PCS_TXFIFO_UCEER, + HIFC_32G_MAC_FC2P_RXFIFO_UCEER, + HIFC_32G_MAC_FC2P_TXFIFO_UCEER, + + /* ERR type 5 */ + HIFC_NON32G_DFX_FC1_DFX_BF_FIFO, + HIFC_NON32G_DFX_FC1_DFX_BP_FIFO, + HIFC_NON32G_DFX_FC1_DFX_RX_AFIFO_ERR, + HIFC_NON32G_DFX_FC1_DFX_TX_AFIFO_ERR, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBUF_FIFO1, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBBC_TO, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXDAT_FIFO, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXCMD_FIFO, + HIFC_NON32G_DFX_FC1_ERR_R_RDY, + + /* ERR type 6 */ + HIFC_NON32G_MAC_FC1_FAIRNESS_ERROR, + + HIFC_ERR_VALUE_BUTT +}; + +struct hifc_up_error_event_s { + struct hifc_mbox_header_s header; + + unsigned char link_event; + unsigned char error_level; + unsigned char error_type; + unsigned char error_value; +}; + +struct hifc_inmbx_clear_node_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_inmbox_get_clear_state_s { + struct hifc_mbox_header_s header; + unsigned int resvd[31]; +}; + +struct hifc_outmbox_get_clear_state_sts_s { + struct hifc_mbox_header_s header; + unsigned short rsvd; + unsigned char state; /* 1--clear doing. 0---clear done. */ + unsigned char status; /* 0--ok,!0---fail */ + unsigned int resvd[30]; +}; + +#define HIFC_FIP_MODE_VN2VF 0 +#define HIFC_FIP_MODE_VN2VN 1 + +/* get port state */ +struct hifc_inmbox_get_port_info_s { + struct hifc_mbox_header_s header; +}; + +/* save hba info */ +struct hifc_inmbox_save_hba_info_s { + struct hifc_mbox_header_s header; + + unsigned int hba_save_info[254]; + +}; + +struct hifc_outmbox_get_port_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned int status : 8; + unsigned int fec_vis_tts_16g : 8; + unsigned int bbscn : 8; + unsigned int loop_credit : 8; + + unsigned int non_loop_rx_credit : 8; + unsigned int non_loop_tx_credit : 8; + unsigned int sfp_speed : 8; + unsigned int present : 8; + +}; + +struct hifc_outmbox_save_hba_info_sts_s { + struct hifc_mbox_header_s header; + unsigned short rsvd1; + unsigned char rsvd2; + unsigned char status; + unsigned int rsvd3; + unsigned int save_hba_info[252]; +}; + +#define HIFC_VER_ADDR_OFFSET (8) +struct hifc_inmbox_get_fw_version_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_outmbox_get_fw_version_sts_s { + struct hifc_mbox_header_s header; + + unsigned char status; + unsigned char rsv[3]; + + unsigned char ucode_ver[HIFC_VER_LEN]; + unsigned char ucode_compile_time[HIFC_COMPILE_TIME_LEN]; + + unsigned char up_ver[HIFC_VER_LEN]; + unsigned char up_compile_time[HIFC_COMPILE_TIME_LEN]; + + unsigned char boot_ver[HIFC_VER_LEN]; + unsigned char boot_compile_time[HIFC_COMPILE_TIME_LEN]; +}; + +/* Set Fec Enable */ +struct hifc_inmbox_config_fec_s { + struct hifc_mbox_header_s header; + + unsigned char fec_op_code; + unsigned char rsv0; + unsigned short rsv1; +}; + +struct hifc_outmbox_config_fec_sts_s { + struct hifc_mbox_header_s header; + + unsigned short usrsv0; + unsigned char ucrsv1; + unsigned char status; +}; + +struct hifc_inmbox_config_timer_s { + struct hifc_mbox_header_s header; + + unsigned short op_code; + unsigned short fun_id; + unsigned int user_data; +}; + +struct hifc_outmbox_config_timer_sts_s { + struct hifc_mbox_header_s header; + + unsigned char status; + unsigned char rsv[3]; +}; + +union hifc_outmbox_generic_u { + struct { + struct hifc_mbox_header_s header; + unsigned int rsvd[(HIFC_MBOX_SIZE - HIFC_MBOX_HEADER_SIZE) / + sizeof(unsigned int)]; + } generic; + + struct hifc_outbox_port_switch_sts_s port_switch_sts; + struct hifc_outbox_config_api_sts_s config_api_sts; + struct hifc_outbox_get_chip_info_sts_s get_chip_info_sts; + struct hifc_outmbox_get_reg_info_sts_s get_reg_info_sts; + struct hifc_outmbox_config_login_sts_s config_login_sts; + struct hifc_outmbox_port_reset_sts_s port_reset_sts; + struct hifc_outmbox_get_sfp_info_sts_s get_sfp_info_sts; + struct hifc_outmbox_get_err_code_sts_s get_err_code_sts; + struct hifc_outmbox_get_clear_state_sts_s get_clr_state_sts; + struct hifc_outmbox_get_fw_version_sts_s get_fw_ver_sts; + struct hifc_outmbox_config_fec_sts_s config_fec_sts; + struct hifc_outmbox_config_timer_sts_s timer_config_sts; + struct hifc_outmbox_get_port_info_sts_s get_port_info_sts; + struct unf_flash_data_mgmt_sts_s flash_data_sts; +}; + +unsigned int hifc_get_chip_msg(void *v_hba, void *v_mac); +unsigned int hifc_config_port_table(struct hifc_hba_s *v_hba); +unsigned int hifc_port_switch(struct hifc_hba_s *v_hba, int turn_on); +unsigned int hifc_get_speed_act(void *v_hba, void *v_speed_act); +unsigned int hifc_get_speed_cfg(void *v_hba, void *v_speed_cfg); +unsigned int hifc_get_loop_map(void *v_hba, void *v_buf); +unsigned int hifc_get_firmware_version(void *v_fc_port, void *v_ver); +unsigned int hifc_get_work_bale_bbcredit(void *v_hba, void *v_bb_credit); +unsigned int hifc_get_work_bale_bbscn(void *v_hba, void *v_bbscn); +unsigned int hifc_get_and_clear_port_error_code(void *v_hba, void *v_err_code); +unsigned int hifc_get_port_current_info(void *v_hba, void *v_port_info); +unsigned int hifc_get_port_fec(void *v_hba, void *v_para_out); +unsigned int hifc_get_software_version(void *v_fc_port, void *v_ver); +unsigned int hifc_get_port_info(void *v_hba); +unsigned int hifc_rw_reg(void *v_hba, void *v_params); +unsigned int hifc_clear_port_error_code(void *v_hba, void *v_err_code); +unsigned int hifc_get_sfp_info(void *v_fc_port, void *v_sfp_info); +unsigned int hifc_get_hardware_version(void *v_fc_port, void *v_ver); +unsigned int hifc_get_lport_led(void *v_hba, void *v_led_state); +unsigned int hifc_get_loop_alpa(void *v_hba, void *v_alpa); +unsigned int hifc_get_topo_act(void *v_hba, void *v_topo_act); +unsigned int hifc_get_topo_cfg(void *v_hba, void *v_topo_cfg); +unsigned int hifc_config_login_api( + struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_parms); +unsigned int hifc_mb_send_and_wait_mbox(struct hifc_hba_s *v_hba, + const void *v_in_mbox, + unsigned short in_size, + union hifc_outmbox_generic_u + *v_out_mbox); +void hifc_up_msg_2_driver_proc(void *v_hwdev_handle, + void *v_pri_handle, + unsigned char v_cmd, + void *v_buf_in, + unsigned short v_in_size, + void *v_buf_out, + unsigned short *v_out_size); + +unsigned int hifc_mbox_reset_chip(struct hifc_hba_s *v_hba, + unsigned char v_sub_type); +unsigned int hifc_clear_sq_wqe_done(struct hifc_hba_s *v_hba); +unsigned int hifc_update_fabric_param(void *v_hba, void *v_para_in); +unsigned int hifc_update_port_param(void *v_hba, void *v_para_in); +unsigned int hifc_mbx_get_fw_clear_stat(struct hifc_hba_s *v_hba, + unsigned int *v_clear_state); +unsigned short hifc_get_global_base_qpn(void *v_handle); +unsigned int hifc_mbx_set_fec(struct hifc_hba_s *v_hba, + unsigned int v_fec_opcode); +unsigned int hifc_notify_up_config_timer(struct hifc_hba_s *v_hba, + int v_opcode, + unsigned int v_user_data); +unsigned int hifc_save_hba_info(void *v_hba, void *v_para_in); +unsigned int hifc_get_chip_capability(void *hw_dev_handle, + struct hifc_chip_info_s *v_chip_info); +unsigned int hifc_get_flash_data(void *v_hba, void *v_flash_data); +unsigned int hifc_set_flash_data(void *v_hba, void *v_flash_data); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_hba.c b/drivers/scsi/huawei/hifc/hifc_hba.c new file mode 100644 index 000000000000..bab735916b8e --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hba.c @@ -0,0 +1,1627 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "hifc_module.h" +#include "hifc_chipitf.h" +#include "hifc_io.h" +#include "hifc_portmng.h" +#include "hifc_lld.h" +#include "hifc_cqm_object.h" +#include "hifc_cqm_main.h" +#include "hifc_mgmt.h" +#include "hifc_hba.h" + +struct hifc_hba_s *hifc_hba[HIFC_HBA_PORT_MAX_NUM]; +unsigned long probe_bit_map[HIFC_MAX_PROBE_PORT_NUM / HIFC_PORT_NUM_PER_TABLE]; +static unsigned long card_num_bit_map[HIFC_MAX_PROBE_PORT_NUM / + HIFC_PORT_NUM_PER_TABLE]; +static struct hifc_card_num_manage_s card_num_manage[HIFC_MAX_CARD_NUM]; +/* probe global lock */ +spinlock_t probe_spin_lock; +unsigned int max_parent_qpc_num; + +static unsigned int hifc_port_config_set(void *v_hba, + enum unf_port_config_set_op_e op_code, + void *v_var_in); +static unsigned int hifc_port_config_get(void *v_hba, + enum unf_port_config_get_op_e op_code, + void *param_out); +static unsigned int hifc_sfp_switch(void *v_hba, void *v_para_in); +static unsigned int hifc_get_hba_pcie_link_state(void *v_hba, + void *v_link_state); + +struct service_register_template_s service_cqm_temp = { + .scq_ctx_size = HIFC_SCQ_CNTX_SIZE, + /* srq, scq context_size configuration */ + .srq_ctx_size = HIFC_SRQ_CNTX_SIZE, + /* the API of asynchronous event from TILE to driver */ + .aeq_callback = hifc_process_aeqe, +}; + +/* default configuration: auto speed, auto topology, INI+TGT */ +static struct unf_cfg_item_s hifc_port_cfg_parm[] = { + { "port_id", 0, 0x110000, 0xffffff}, + /* port mode:INI(0x20), TGT(0x10), BOTH(0x30) */ + { "port_mode", 0, 0x20, 0xff}, + /* port topology, 0x3: loop, 0xc:p2p, 0xf:auto ,0x10:vn2vn */ + { "port_topology", 0, 0xf, 0x20}, + /* alpa address of port */ + { "port_alpa", 0, 0xdead, 0xffff}, + /* queue depth of originator registered to SCSI midlayer */ + { "max_queue_depth", 0, 512, 512}, + { "sest_num", 0, 4096, 4096}, + { "max_login", 0, 2048, 2048}, + /* nodename from 32 bit to 64 bit */ + { "node_name_high", 0, 0x1000286e, 0xffffffff}, + /* nodename from 0 bit to 31 bit */ + { "node_name_low", 0, 0xd4bbf12f, 0xffffffff}, + /* portname from 32 bit to 64 bit */ + { "port_name_high", 0, 0x2000286e, 0xffffffff}, + /* portname from 0 bit to 31 bit */ + { "port_name_low", 0, 0xd4bbf12f, 0xffffffff}, + /* port speed 0:auto 1:1Gbps 2:2Gbps 3:4Gbps 4:8Gbps 5:16Gbps */ + { "port_speed", 0, 0, 32}, + /* unit: us */ + { "interrupt_delay", 0, 0, 100}, + { "tape_support", 0, 0, 1}, /* tape support */ + { "End", 0, 0, 0} +}; + +struct unf_low_level_function_op_s hifc_fun_op = { + .low_level_type = UNF_HIFC_FC, + .name = "HIFC", + /* XID allocated from CM level */ + .xchg_mgr_type = UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE, + .abts_xchg = UNF_NO_EXTRA_ABTS_XCHG, + .pass_through_flag = UNF_LOW_LEVEL_PASS_THROUGH_PORT_LOGIN, + .support_max_npiv_num = UNF_HIFC_MAXNPIV_NUM, + .chip_id = 0, + .support_max_speed = UNF_PORT_SPEED_32_G, + .support_max_rport = UNF_HIFC_MAXRPORT_NUM, + .sfp_type = UNF_PORT_TYPE_FC_SFP, + .rport_release_type = UNF_LOW_LEVEL_RELEASE_RPORT_ASYNC, + .sirt_page_mode = UNF_LOW_LEVEL_SIRT_PAGE_MODE_XCHG, + + /* Link service */ + .service_op = { + .pfn_unf_els_send = hifc_send_els_cmnd, + .pfn_unf_bls_send = hifc_send_bls_cmnd, + .pfn_unf_gs_send = hifc_send_gs_cmnd, + .pfn_unf_cmnd_send = hifc_send_scsi_cmnd, + .pfn_unf_release_rport_res = hifc_free_parent_resource, + .pfn_unf_flush_ini_resp_que = hifc_flush_ini_resp_queue, + .pfn_unf_alloc_rport_res = hifc_alloc_parent_resource, + .pfn_unf_rport_session_rst = hifc_rport_session_rst, + }, + + /* Port Mgr */ + .port_mgr_op = { + .pfn_ll_port_config_set = hifc_port_config_set, + .pfn_ll_port_config_get = hifc_port_config_get, + .pfn_ll_port_diagnose = hifc_port_diagnose, + } +}; + +struct hifc_port_config_op_s { + enum unf_port_config_set_op_e op_code; + unsigned int (*pfn_hifc_operation)(void *v_hba, void *v_para_in); +}; + +struct hifc_port_config_op_s hifc_config_set_op[] = { + { UNF_PORT_CFG_SET_SPEED, hifc_set_port_speed }, + { UNF_PORT_CFG_SET_TOPO, hifc_set_port_topo }, + { UNF_PORT_CFG_SET_BBSCN, hifc_set_port_bbscn }, + { UNF_PORT_CFG_SET_SFP_SWITCH, hifc_sfp_switch }, + { UNF_PORT_CFG_SET_PORT_SWITCH, hifc_sfp_switch }, + { UNF_PORT_CFG_SET_PORT_STATE, hifc_set_port_state }, + { UNF_PORT_CFG_UPDATE_WWN, NULL }, + { UNF_PORT_CFG_SET_FCP_CONF, hifc_set_port_fcp_conf }, + { UNF_PORT_CFG_SET_LOOP_ROLE, hifc_set_loop_role }, + { UNF_PORT_CFG_SET_MAX_SUPPORT_SPEED, hifc_set_max_support_speed }, + { UNF_PORT_CFG_UPDATE_FABRIC_PARAM, hifc_update_fabric_param }, + { UNF_PORT_CFG_UPDATE_PLOGI_PARAM, hifc_update_port_param }, + { UNF_PORT_CFG_UPDATE_FDISC_PARAM, NULL }, + { UNF_PORT_CFG_SAVE_HBA_INFO, hifc_save_hba_info }, + { UNF_PORT_CFG_SET_HBA_BASE_INFO, hifc_set_hba_base_info }, + { UNF_PORT_CFG_SET_FLASH_DATA_INFO, hifc_set_flash_data }, + { UNF_PORT_CFG_SET_BUTT, NULL } +}; + +struct hifc_port_cfg_get_op_s { + enum unf_port_config_get_op_e op_code; + unsigned int (*pfn_hifc_operation)(void *v_hba, void *param_out); +}; + +struct hifc_port_cfg_get_op_s hifc_config_get_op[] = { + { UNF_PORT_CFG_GET_SPEED_CFG, hifc_get_speed_cfg }, + { UNF_PORT_CFG_GET_SPEED_ACT, hifc_get_speed_act }, + { UNF_PORT_CFG_GET_TOPO_CFG, hifc_get_topo_cfg }, + { UNF_PORT_CFG_GET_TOPO_ACT, hifc_get_topo_act }, + { UNF_PORT_CFG_GET_LOOP_MAP, hifc_get_loop_map }, + { UNF_PORT_CFG_GET_SFP_PRESENT, NULL }, + { UNF_PORT_CFG_GET_SFP_INFO, hifc_get_sfp_info }, + { UNF_PORT_CFG_GET_FW_VER, hifc_get_firmware_version }, + { UNF_PORT_CFG_GET_HW_VER, hifc_get_hardware_version }, + { UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, hifc_get_work_bale_bbcredit }, + { UNF_PORT_CFG_GET_WORKBALE_BBSCN, hifc_get_work_bale_bbscn }, + { UNF_PORT_CFG_GET_LOOP_ALPA, hifc_get_loop_alpa }, + { UNF_PORT_CFG_GET_MAC_ADDR, hifc_get_chip_msg }, + { UNF_PORT_CFG_CLR_LESB, hifc_clear_port_error_code }, + { UNF_PORT_CFG_GET_LESB_THEN_CLR, hifc_get_and_clear_port_error_code}, + { UNF_PORT_CFG_GET_PORT_INFO, hifc_get_port_current_info }, + { UNF_PORT_CFG_GET_LED_STATE, hifc_get_lport_led }, + { UNF_PORT_CFG_GET_FEC, hifc_get_port_fec }, + { UNF_PORT_CFG_GET_PCIE_LINK_STATE, hifc_get_hba_pcie_link_state }, + { UNF_PORT_CFG_GET_FLASH_DATA_INFO, hifc_get_flash_data }, + { UNF_PORT_CFG_GET_BUTT, NULL } +}; + +static unsigned int hifc_port_config_set(void *v_phba, + enum unf_port_config_set_op_e op_code, + void *v_var_in) +{ + unsigned int op_idx = 0; + + HIFC_CHECK(INVALID_VALUE32, v_phba, return UNF_RETURN_ERROR); + + for (op_idx = 0; + op_idx < sizeof(hifc_config_set_op) / + sizeof(struct hifc_port_config_op_s); + op_idx++) { + if (op_code == hifc_config_set_op[op_idx].op_code) { + if (!hifc_config_set_op[op_idx].pfn_hifc_operation) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Null operation for configuration, opcode(0x%x), operation ID(0x%x)", + op_code, op_idx); + return UNF_RETURN_ERROR; + } else { + return hifc_config_set_op[op_idx].pfn_hifc_operation(v_phba, v_var_in); + } + } + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]No operation code for configuration, opcode(0x%x)", + op_code); + + return UNF_RETURN_ERROR; +} + +static unsigned int hifc_port_config_get(void *v_phba, + enum unf_port_config_get_op_e op_code, + void *v_para_out) +{ + unsigned int op_idx = 0; + + HIFC_CHECK(INVALID_VALUE32, v_phba, return UNF_RETURN_ERROR); + + for (op_idx = 0; + op_idx < sizeof(hifc_config_get_op) / + sizeof(struct hifc_port_cfg_get_op_s); + op_idx++) { + if (op_code == hifc_config_get_op[op_idx].op_code) { + if (!hifc_config_get_op[op_idx].pfn_hifc_operation) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Null operation to get configuration, opcode(0x%x), operation ID(0x%x)", + op_code, op_idx); + return UNF_RETURN_ERROR; + } else { + return hifc_config_get_op[op_idx].pfn_hifc_operation(v_phba, v_para_out); + } + } + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]No operation to get configuration, opcode(0x%x)", + op_code); + + return UNF_RETURN_ERROR; +} + +static unsigned int hifc_check_port_cfg( + const struct hifc_port_cfg_s *v_port_cfg) +{ + int topo_condition, speed_condition; + /* About Work Topology */ + topo_condition = ((v_port_cfg->port_topology != UNF_TOP_LOOP_MASK) && + (v_port_cfg->port_topology != UNF_TOP_P2P_MASK) && + (v_port_cfg->port_topology != UNF_TOP_AUTO_MASK)); + if (topo_condition) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Configured port topology(0x%x) is incorrect", + v_port_cfg->port_topology); + return UNF_RETURN_ERROR; + } + + /* About Work Mode */ + if (v_port_cfg->port_mode != UNF_PORT_MODE_INI) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Configured port mode(0x%x) is incorrect", + v_port_cfg->port_mode); + + return UNF_RETURN_ERROR; + } + + /* About Work Speed */ + speed_condition = ((v_port_cfg->port_speed != UNF_PORT_SPEED_AUTO) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_2_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_4_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_8_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_16_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_32_G)); + if (speed_condition) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Configured port speed(0x%x) is incorrect", + v_port_cfg->port_speed); + return UNF_RETURN_ERROR; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Check port configuration OK"); + + return RETURN_OK; +} + +static unsigned int hifc_get_port_cfg(struct hifc_hba_s *v_hba, + struct hifc_chip_info_s *v_chip_info, + unsigned char v_card_num) +{ +#define UNF_CONFIG_ITEM_LEN 15 + + /* + * Maximum length of a configuration item name, including the end + * character + */ +#define UNF_MAX_ITEM_NAME_LEN (32 + 1) + + /* Get and check parameters */ + char cfg_item[UNF_MAX_ITEM_NAME_LEN]; + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = v_hba; + int iret = RETURN_ERROR_S32; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + memset((void *)cfg_item, 0, sizeof(cfg_item)); + + hba->card_info.func_num = + (hifc_global_func_id(v_hba->hw_dev_handle)) & UNF_FUN_ID_MASK; + hba->card_info.card_num = v_card_num; + + /* The range of PF of FC server is from PF1 to PF2 */ + iret = snprintf(cfg_item, UNF_CONFIG_ITEM_LEN, "hifc_cfg_%1u", + (hba->card_info.func_num)); + UNF_FUNCTION_RETURN_CHECK(iret, UNF_CONFIG_ITEM_LEN); + cfg_item[UNF_MAX_ITEM_NAME_LEN - 1] = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Get port configuration: %s", cfg_item); + + /* Get configuration parameters from file */ + UNF_LOWLEVEL_GET_CFG_PARMS(ret, cfg_item, &hifc_port_cfg_parm[0], + (unsigned int *)(void *)&hba->port_cfg, + sizeof(hifc_port_cfg_parm) / + sizeof(struct unf_cfg_item_s)); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't get configuration", + hba->port_cfg.port_id); + + return ret; + } + + if (max_parent_qpc_num <= 2048) { + hba->port_cfg.sest_num = 2048; + hba->port_cfg.max_login = 2048; + } + + hba->port_cfg.port_id &= 0xff0000; + hba->port_cfg.port_id |= hba->card_info.card_num << 8; + hba->port_cfg.port_id |= hba->card_info.func_num; + hba->port_cfg.tape_support = (unsigned int)v_chip_info->tape_support; + + /* Parameters check */ + ret = hifc_check_port_cfg(&hba->port_cfg); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) check configuration incorrect", + hba->port_cfg.port_id); + + return ret; + } + + /* Set configuration which is got from file */ + hba->port_speed_cfg = hba->port_cfg.port_speed; + hba->port_topo_cfg = hba->port_cfg.port_topology; + + return ret; +} + +void hifc_flush_root_ctx(struct hifc_hba_s *v_hba) +{ + int ret = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + ret = hifc_func_rx_tx_flush(v_hba->hw_dev_handle); + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]chipif_func_rx_tx_flush failed with return value(0x%x)", + ret); + } +} + +static unsigned int hifc_delete_srqc_via_cmdq_sync(struct hifc_hba_s *v_hba, + unsigned long long sqrc_gpa) +{ + /* Via CMND Queue */ +#define HIFC_DEL_SRQC_TIMEOUT 3000 + + int ret; + struct hifcoe_cmdqe_delete_srqc_s del_srqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + /* Alloc Cmnd buffer */ + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf allocate failed"); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC); + return UNF_RETURN_ERROR; + } + + /* Build & Send Cmnd */ + memset(&del_srqc_cmd, 0, sizeof(del_srqc_cmd)); + del_srqc_cmd.wd0.task_type = HIFCOE_TASK_T_DEL_SRQC; + del_srqc_cmd.srqc_gpa_h = HIFC_HIGH_32_BITS(sqrc_gpa); + del_srqc_cmd.srqc_gpa_l = HIFC_LOW_32_BITS(sqrc_gpa); + hifc_cpu_to_big32(&del_srqc_cmd, sizeof(del_srqc_cmd)); + memcpy(cmdq_in_buf->buf, &del_srqc_cmd, sizeof(del_srqc_cmd)); + cmdq_in_buf->size = sizeof(del_srqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, + cmdq_in_buf, NULL, HIFC_DEL_SRQC_TIMEOUT); + + /* Free Cmnd Buffer */ + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send del srqc via cmdq failed, ret=0x%x", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC); + + return RETURN_OK; +} + +void hifc_flush_srq_ctx(struct hifc_hba_s *v_hba) +{ + struct hifc_srq_info_s *srq_info = NULL; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Start destroy ELS SRQC"); + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + /* Check state to avoid to flush SRQC again */ + srq_info = &v_hba->els_srq_info; + if (srq_info->srq_type == HIFC_SRQ_ELS && + srq_info->enable == UNF_TRUE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]HBA(0x%x) flush ELS SRQC", + v_hba->port_index); + + (void)hifc_delete_srqc_via_cmdq_sync( + v_hba, + srq_info->cqm_srq_info->q_ctx_paddr); + } +} + +static unsigned int hifc_create_queues(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + + ret = hifc_create_root_queues(v_hba); + if (ret != RETURN_OK) + goto out_creat_root_queue_fail; + + /* Initialize shared resources of SCQ and SRQ in parent queue */ + ret = hifc_create_common_share_queues(v_hba); + if (ret != RETURN_OK) + goto out_create_common_queue_fail; + + /* Initialize parent queue manager resources */ + ret = hifc_alloc_parent_queue_mgr(v_hba); + if (ret != RETURN_OK) + goto out_free_share_queue_resource; + + /* Initialize shared WQE page pool in parent SQ */ + ret = hifc_alloc_parent_sq_wqe_page_pool(v_hba); + if (ret != RETURN_OK) + goto out_free_parent_queue_resource; + + /* + * Notice: the configuration of SQ and QID(default_sq_id) + * must be the same in FC + */ + v_hba->next_clearing_sq = 0; + v_hba->default_sq_id = HIFC_QID_SQ; + + return RETURN_OK; + +out_free_parent_queue_resource: + hifc_free_parent_queue_mgr(v_hba); + +out_free_share_queue_resource: + hifc_flush_scq_ctx(v_hba); + hifc_flush_srq_ctx(v_hba); + hifc_destroy_common_share_queues(v_hba); + +out_create_common_queue_fail: + hifc_destroy_root_queues(v_hba); + +out_creat_root_queue_fail: + hifc_flush_root_ctx(v_hba); + + return ret; +} + +static void hifc_destroy_queues(struct hifc_hba_s *v_hba) +{ + /* Free parent queue resource */ + hifc_free_parent_queues(v_hba); + + /* Free queue manager resource */ + hifc_free_parent_queue_mgr(v_hba); + + /* Free linked List SQ and WQE page pool resource */ + hifc_free_parent_sq_wqe_page_pool(v_hba); + + /* Free shared SRQ and SCQ queue resource */ + hifc_destroy_common_share_queues(v_hba); + + /* Free root queue resource */ + hifc_destroy_root_queues(v_hba); +} + +static unsigned int hifc_notify_up_open_timer(struct hifc_hba_s *v_hba) +{ + int op_code = UNF_TRUE; + unsigned int cmd_scq_bit_map = 0; + unsigned int scq_index = 0; + unsigned int ret; + + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) + cmd_scq_bit_map |= HIFC_SCQ_IS_CMD(scq_index) ? + (1 << scq_index) : (0 << scq_index); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) open timer, cmdscq bitmap:0x%x", + v_hba->port_cfg.port_id, cmd_scq_bit_map); + + ret = hifc_notify_up_config_timer(v_hba, op_code, cmd_scq_bit_map); + + return ret; +} + +static unsigned int hifc_notify_up_close_timer(struct hifc_hba_s *v_hba) +{ + int op_code = UNF_FALSE; + unsigned int cmd_scq_bit_map = 0; + unsigned int scq_index = 0; + unsigned int ret; + + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) + cmd_scq_bit_map |= HIFC_SCQ_IS_CMD(scq_index) ? + (1 << scq_index) : (0 << scq_index); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) close timer with cmd_scq bitmap(0x%x)", + v_hba->port_cfg.port_id, cmd_scq_bit_map); + + ret = hifc_notify_up_config_timer(v_hba, op_code, cmd_scq_bit_map); + + return ret; +} + +static unsigned int hifc_initial_chip_access(struct hifc_hba_s *v_hba) +{ + int ret = RETURN_OK; + + /* 1. + * Initialize cqm access related with scq, emb cq, aeq(ucode-->driver) + */ + service_cqm_temp.service_handle = v_hba; + ret = cqm_service_register(v_hba->hw_dev_handle, &service_cqm_temp); + if (ret != CQM_SUCCESS) + return UNF_RETURN_ERROR; + + /* 2. Initialize mailbox(driver-->up), aeq(up--->driver) access */ + ret = hifc_register_mgmt_msg_cb(v_hba->hw_dev_handle, + HIFC_MOD_FC, v_hba, + hifc_up_msg_2_driver_proc); + if (ret != CQM_SUCCESS) + goto out_unreg_cqm; + + return RETURN_OK; + +out_unreg_cqm: + cqm_service_unregister(v_hba->hw_dev_handle); + + return UNF_RETURN_ERROR; +} + +static void hifc_release_chip_access(struct hifc_hba_s *v_hba) +{ + HIFC_CHECK(INVALID_VALUE32, v_hba->hw_dev_handle, return); + + hifc_unregister_mgmt_msg_cb(v_hba->hw_dev_handle, HIFC_MOD_FC); + + cqm_service_unregister(v_hba->hw_dev_handle); +} + +static void hifc_get_chip_info(struct hifc_hba_s *v_hba) +{ + unsigned int exi_base = 0; + unsigned int fun_index = 0; + + v_hba->vpid_start = v_hba->fc_service_cap.dev_fc_cap.vp_id_start; + v_hba->vpid_end = v_hba->fc_service_cap.dev_fc_cap.vp_id_end; + fun_index = hifc_global_func_id(v_hba->hw_dev_handle); + exi_base = 0; + + exi_base += (fun_index * HIFC_EXIT_STRIDE); + v_hba->exit_base = HIFC_LSW(exi_base); + v_hba->exit_count = HIFC_EXIT_STRIDE; + v_hba->image_count = UNF_HIFC_MAXRPORT_NUM; + v_hba->max_support_speed = max_speed; + v_hba->port_index = HIFC_LSB(fun_index); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) base information: PortIndex=0x%x, ImgCount=0x%x, ExiBase=0x%x, ExiCount=0x%x, VpIdStart=0x%x, VpIdEnd=0x%x, MaxSpeed=0x%x, Speed=0x%x, Topo=0x%x", + v_hba->port_cfg.port_id, v_hba->port_index, + v_hba->image_count, v_hba->exit_base, + v_hba->exit_count, v_hba->vpid_start, + v_hba->vpid_end, v_hba->max_support_speed, + v_hba->port_speed_cfg, v_hba->port_topo_cfg); +} + +static unsigned int hifc_init_host_res(struct hifc_hba_s *v_hba) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + + /* Initialize spin lock */ + spin_lock_init(&hba->hba_lock); + spin_lock_init(&hba->flush_state_lock); + spin_lock_init(&hba->delay_info.srq_lock); + /* Initialize init_completion */ + init_completion(&hba->hba_init_complete); + init_completion(&hba->mbox_complete); + + /* Step-1: initialize the communication channel between driver and uP */ + ret = hifc_initial_chip_access(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't initialize chip access", + hba->port_cfg.port_id); + + goto out_unmap_memory; + } + /* Step-2: get chip configuration information before creating + * queue resources + */ + hifc_get_chip_info(hba); + + /* Step-3: create queue resources */ + ret = hifc_create_queues(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't create queues", + hba->port_cfg.port_id); + + goto out_release_chip_access; + } + + /* Initialize status parameters */ + hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN; + hba->active_topo = UNF_ACT_TOP_UNKNOWN; + hba->sfp_on = UNF_FALSE; + hba->port_loop_role = UNF_LOOP_ROLE_MASTER_OR_SLAVE; + hba->phy_link = UNF_PORT_LINK_DOWN; + hba->q_set_stage = HIFC_QUEUE_SET_STAGE_INIT; + + /* Initialize parameters referring to the lowlevel */ + hba->remote_rttov_tag = 0; + hba->port_bbscn_cfg = HIFC_LOWLEVEL_DEFAULT_BB_SCN; + + /* Initialize timer, and the unit of E_D_TOV is ms */ + hba->remote_edtov_tag = 0; + hba->remote_bbcredit = 0; + hba->compared_bbscn = 0; + hba->compared_edtov_val = UNF_DEFAULT_EDTOV; + hba->compared_ratov_val = UNF_DEFAULT_RATOV; + hba->removing = UNF_FALSE; + hba->dev_present = UNF_TRUE; + + /* Initialize parameters about cos */ + hba->cos_bit_map = cos_bit_map; + memset(hba->cos_rport_cnt, 0, HIFC_MAX_COS_NUM * sizeof(atomic_t)); + + /* Mailbox access completion */ + complete(&hba->mbox_complete); + + /* Notify uP to open timer after creating scq */ + ret = hifc_notify_up_open_timer(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't open timer", + hba->port_cfg.port_id); + + goto out_destroy_queues; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]HIFC port(0x%x) initialize host resources succeeded", + hba->port_cfg.port_id); + + return ret; + +out_destroy_queues: + hifc_flush_scq_ctx(hba); + hifc_flush_srq_ctx(hba); + hifc_flush_root_ctx(hba); + hifc_destroy_queues(hba); + +out_release_chip_access: + hifc_release_chip_access(hba); + +out_unmap_memory: + return ret; +} + +static void hifc_update_lport_config( + struct hifc_hba_s *v_hba, + struct unf_low_level_function_op_s *v_low_level_fun) +{ +#define HIFC_MULTI_CONF_NONSUPPORT 0 + + struct unf_lport_cfg_item_s *lport_cfg_items = NULL; + + lport_cfg_items = &v_low_level_fun->lport_cfg_items; + + if (v_hba->port_cfg.max_login < v_low_level_fun->support_max_rport) + lport_cfg_items->max_login = v_hba->port_cfg.max_login; + else + lport_cfg_items->max_login = v_low_level_fun->support_max_rport; + + if ((v_hba->port_cfg.sest_num / 2) < UNF_RESERVE_SFS_XCHG) + lport_cfg_items->max_io = v_hba->port_cfg.sest_num; + else + lport_cfg_items->max_io = v_hba->port_cfg.sest_num - + UNF_RESERVE_SFS_XCHG; + + lport_cfg_items->max_sfs_xchg = UNF_MAX_SFS_XCHG; + lport_cfg_items->port_id = v_hba->port_cfg.port_id; + lport_cfg_items->port_mode = v_hba->port_cfg.port_mode; + lport_cfg_items->port_topology = v_hba->port_cfg.port_topology; + lport_cfg_items->max_queue_depth = v_hba->port_cfg.max_queue_depth; + + lport_cfg_items->port_speed = v_hba->port_cfg.port_speed; + lport_cfg_items->tape_support = v_hba->port_cfg.tape_support; + lport_cfg_items->res_mgmt_enabled = UNF_FALSE; + + v_low_level_fun->sys_port_name = + *(unsigned long long *)v_hba->sys_port_name; + v_low_level_fun->sys_node_name = + *(unsigned long long *)v_hba->sys_node_name; + + /* Update chip information */ + v_low_level_fun->dev = v_hba->pci_dev; + v_low_level_fun->chip_info.chip_work_mode = v_hba->work_mode; + v_low_level_fun->chip_info.chip_type = v_hba->chip_type; + v_low_level_fun->chip_info.disable_err_flag = 0; + v_low_level_fun->support_max_speed = v_hba->max_support_speed; + + v_low_level_fun->chip_id = 0; + + v_low_level_fun->sfp_type = UNF_PORT_TYPE_FC_SFP; + + v_low_level_fun->multi_conf_support = HIFC_MULTI_CONF_NONSUPPORT; + v_low_level_fun->support_max_xid_range = v_hba->port_cfg.sest_num; + v_low_level_fun->update_fw_reset_active = + UNF_PORT_UNGRADE_FW_RESET_INACTIVE; + v_low_level_fun->port_type = DRV_PORT_ENTITY_TYPE_PHYSICAL; + + if ((lport_cfg_items->port_id & UNF_FIRST_LPORT_ID_MASK) == + lport_cfg_items->port_id) { + v_low_level_fun->support_upgrade_report = + UNF_PORT_SUPPORT_UPGRADE_REPORT; + } else { + v_low_level_fun->support_upgrade_report = + UNF_PORT_UNSUPPORT_UPGRADE_REPORT; + } + + v_low_level_fun->low_level_type |= UNF_FC_PROTOCOL_TYPE; +} + +static unsigned int hifc_create_lport(struct hifc_hba_s *v_hba) +{ + void *lport = NULL; + struct unf_low_level_function_op_s low_level_fun; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + hifc_fun_op.dev = v_hba->pci_dev; + memcpy(&low_level_fun, &hifc_fun_op, + sizeof(struct unf_low_level_function_op_s)); + + /* Update port configuration table */ + hifc_update_lport_config(v_hba, &low_level_fun); + + /* Apply for lport resources */ + UNF_LOWLEVEL_ALLOC_LPORT(lport, v_hba, &low_level_fun); + if (!lport) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't allocate Lport", + v_hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + v_hba->lport = lport; + + return RETURN_OK; +} + +void hifc_release_probe_index(unsigned int probe_index) +{ + if (probe_index >= HIFC_MAX_PROBE_PORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Probe index(0x%x) is invalid", probe_index); + + return; + } + + spin_lock(&probe_spin_lock); + if (!test_bit((int)probe_index, (const unsigned long *)probe_bit_map)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Probe index(0x%x) is not probed", + probe_index); + + spin_unlock(&probe_spin_lock); + + return; + } + + clear_bit((int)probe_index, probe_bit_map); + spin_unlock(&probe_spin_lock); +} + +static void hifc_release_host_res(struct hifc_hba_s *v_hba) +{ + hifc_destroy_queues(v_hba); + hifc_release_chip_access(v_hba); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) release low level resource done", + v_hba->port_cfg.port_id); +} + +static struct hifc_hba_s *hifc_init_hba(struct pci_dev *v_dev, + void *v_hwdev_handle, + struct hifc_chip_info_s *v_chip_info, + unsigned char v_card_num) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = NULL; + + /* Allocate HBA */ + hba = kmalloc(sizeof(*hba), GFP_ATOMIC); + HIFC_CHECK(INVALID_VALUE32, hba, return NULL); + memset(hba, 0, sizeof(struct hifc_hba_s)); + + /* Heartbeat default */ + hba->heart_status = 1; + + /* Private data in pciDev */ + hba->pci_dev = v_dev; /* PCI device */ + hba->hw_dev_handle = v_hwdev_handle; + + /* Work mode */ + hba->work_mode = v_chip_info->work_mode; + /* Create work queue */ + hba->work_queue = create_singlethread_workqueue("hifc"); + if (!hba->work_queue) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Hifc creat workqueue failed"); + + goto out_free_hba; + } + /* Init delay work */ + INIT_DELAYED_WORK(&hba->delay_info.del_work, + hifc_rcvd_els_from_srq_time_out); + + /* Notice: Only use FC features */ + (void)hifc_support_fc(v_hwdev_handle, &hba->fc_service_cap); + /* Check parent context available */ + if (hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num == 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]FC parent context is not allocated in this function"); + + goto out_destroy_workqueue; + } + max_parent_qpc_num = hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num; + + /* Get port configuration */ + ret = hifc_get_port_cfg(hba, v_chip_info, v_card_num); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Can't get port configuration"); + + goto out_destroy_workqueue; + } + /* Get WWN */ + *(unsigned long long *)hba->sys_node_name = v_chip_info->wwnn; + *(unsigned long long *)hba->sys_port_name = v_chip_info->wwpn; + + /* Initialize host resources */ + ret = hifc_init_host_res(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't initialize host resource", + hba->port_cfg.port_id); + + goto out_destroy_workqueue; + } + + /* Local Port create */ + ret = hifc_create_lport(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't create lport", + hba->port_cfg.port_id); + goto out_release_host_res; + } + complete(&hba->hba_init_complete); + + /* Print reference count */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]Port(0x%x) probe succeeded.", + hba->port_cfg.port_id); + + return hba; + +out_release_host_res: + hifc_flush_scq_ctx(hba); + hifc_flush_srq_ctx(hba); + hifc_flush_root_ctx(hba); + hifc_release_host_res(hba); + +out_destroy_workqueue: + flush_workqueue(hba->work_queue); + destroy_workqueue(hba->work_queue); + hba->work_queue = NULL; + +out_free_hba: + kfree(hba); + + return NULL; +} + +void hifc_get_total_probed_num(unsigned int *v_probe_cnt) +{ + unsigned int i = 0; + unsigned int count = 0; + + spin_lock(&probe_spin_lock); + for (i = 0; i < HIFC_MAX_PROBE_PORT_NUM; i++) { + if (test_bit((int)i, (const unsigned long *)probe_bit_map)) + count++; + } + + *v_probe_cnt = count; + spin_unlock(&probe_spin_lock); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Probed port total number is 0x%x", count); +} + +static unsigned int hifc_assign_card_num(struct hifc_lld_dev *lld_dev, + struct hifc_chip_info_s *v_chip_info, + unsigned char *v_card_num) +{ + unsigned char i = 0; + unsigned long long card_index = 0; + + card_index = (!pci_is_root_bus(lld_dev->pdev->bus)) ? + lld_dev->pdev->bus->parent->number : + lld_dev->pdev->bus->number; + + spin_lock(&probe_spin_lock); + + for (i = 0; i < HIFC_MAX_CARD_NUM; i++) { + if (test_bit((int)i, (const unsigned long *)card_num_bit_map)) { + if ((card_num_manage[i].card_number == card_index) && + (card_num_manage[i].is_removing == UNF_FALSE)) { + card_num_manage[i].port_count++; + *v_card_num = i; + spin_unlock(&probe_spin_lock); + return RETURN_OK; + } + } + } + + for (i = 0; i < HIFC_MAX_CARD_NUM; i++) { + if (!test_bit((int)i, + (const unsigned long *)card_num_bit_map)) { + card_num_manage[i].card_number = card_index; + card_num_manage[i].port_count = 1; + card_num_manage[i].is_removing = UNF_FALSE; + *v_card_num = i; + set_bit(i, card_num_bit_map); + spin_unlock(&probe_spin_lock); + + return RETURN_OK; + } + } + + spin_unlock(&probe_spin_lock); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Have probe more than 0x%x port, probe failed", i); + + return UNF_RETURN_ERROR; +} + +static void hifc_dec_and_free_card_num(unsigned char v_card_num) +{ + /* 2 ports per card */ + if (v_card_num >= HIFC_MAX_CARD_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Card number(0x%x) is invalid", v_card_num); + + return; + } + + spin_lock(&probe_spin_lock); + + if (test_bit((int)v_card_num, + (const unsigned long *)card_num_bit_map)) { + card_num_manage[v_card_num].port_count--; + card_num_manage[v_card_num].is_removing = UNF_TRUE; + + if (card_num_manage[v_card_num].port_count == 0) { + card_num_manage[v_card_num].card_number = 0; + card_num_manage[v_card_num].is_removing = UNF_FALSE; + clear_bit((int)v_card_num, card_num_bit_map); + } + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Can not find card number(0x%x)", v_card_num); + } + + spin_unlock(&probe_spin_lock); +} + +unsigned int hifc_assign_probe_index(unsigned int *v_probe_index) +{ + unsigned int i = 0; + + spin_lock(&probe_spin_lock); + for (i = 0; i < HIFC_MAX_PROBE_PORT_NUM; i++) { + if (!test_bit((int)i, (const unsigned long *)probe_bit_map)) { + *v_probe_index = i; + set_bit(i, probe_bit_map); + spin_unlock(&probe_spin_lock); + + return RETURN_OK; + } + } + spin_unlock(&probe_spin_lock); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Have probe more than 0x%x port, probe failed", i); + + return UNF_RETURN_ERROR; +} + +int hifc_probe(struct hifc_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name) +{ + struct pci_dev *dev = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int probe_index = 0; + unsigned int probe_total_num = 0; + unsigned char card_num = INVALID_VALUE8; + struct hifc_chip_info_s chip_info; + + HIFC_CHECK(INVALID_VALUE32, lld_dev, return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, lld_dev->hwdev, + return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, lld_dev->pdev, return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, uld_dev, return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, uld_dev_name, return UNF_RETURN_ERROR_S32); + + dev = lld_dev->pdev; /* pcie device */ + + memset(&chip_info, 0, sizeof(struct hifc_chip_info_s)); + /* 1. Get & check Total_Probed_number */ + hifc_get_total_probed_num(&probe_total_num); + if (probe_total_num >= HIFC_MAX_PORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Total probe num (0x%x) is larger than allowed number(64)", + probe_total_num); + + return UNF_RETURN_ERROR_S32; + } + /* 2. Check device work mode */ + if (hifc_support_fc(lld_dev->hwdev, NULL)) { + chip_info.work_mode = HIFC_SMARTIO_WORK_MODE_FC; + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port work mode is not FC"); + return UNF_RETURN_ERROR_S32; + } + + /* 4. Assign & Get new Probe index */ + ret = hifc_assign_probe_index(&probe_index); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]AssignProbeIndex fail"); + + return UNF_RETURN_ERROR_S32; + } + + ret = hifc_get_chip_capability((void *)lld_dev->hwdev, &chip_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]GetChipCapability fail"); + return UNF_RETURN_ERROR_S32; + } + + /* Assign & Get new Card number */ + ret = hifc_assign_card_num(lld_dev, &chip_info, &card_num); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_assign_card_num fail"); + hifc_release_probe_index(probe_index); + + return UNF_RETURN_ERROR_S32; + } + + /* Init HBA resource */ + hba = hifc_init_hba(dev, lld_dev->hwdev, &chip_info, card_num); + if (!hba) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Probe HBA(0x%x) failed.", probe_index); + + hifc_release_probe_index(probe_index); + hifc_dec_and_free_card_num(card_num); + + return UNF_RETURN_ERROR_S32; + } + + /* Name by the order of probe */ + *uld_dev = hba; + + snprintf(uld_dev_name, HIFC_PORT_NAME_STR_LEN, "%s%02x%02x", + HIFC_PORT_NAME_LABEL, + hba->card_info.card_num, hba->card_info.func_num); + memcpy(hba->port_name, uld_dev_name, HIFC_PORT_NAME_STR_LEN); + + hba->probe_index = probe_index; + hifc_hba[probe_index] = hba; + + return RETURN_OK; +} + +static unsigned int hifc_port_check_fw_ready(struct hifc_hba_s *v_hba) +{ +#define HIFC_PORT_CLEAR_DONE 0 +#define HIFC_PORT_CLEAR_DOING 1 + unsigned int clear_state = HIFC_PORT_CLEAR_DOING; + unsigned int ret = RETURN_OK; + unsigned int wait_time_out = 0; + + do { + msleep(1000); + wait_time_out += 1000; + ret = hifc_mbx_get_fw_clear_stat(v_hba, &clear_state); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + /* Total time more than 30s, retry more than 3 times, failed */ + if ((wait_time_out > 30000) && + (clear_state != HIFC_PORT_CLEAR_DONE)) + return UNF_RETURN_ERROR; + + } while (clear_state != HIFC_PORT_CLEAR_DONE); + + return RETURN_OK; +} + +static unsigned int hifc_sfp_switch(void *v_hba, void *v_para_in) +{ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + int turn_on = UNF_FALSE; + unsigned int ret = RETURN_OK; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_para_in, return UNF_RETURN_ERROR); + + /* Redundancy check */ + turn_on = *((int *)v_para_in); + if (turn_on == hba->sfp_on) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) FC physical port is already %s", + hba->port_cfg.port_id, (turn_on) ? "on" : "off"); + + return ret; + } + if (turn_on == UNF_TRUE) { + ret = hifc_port_check_fw_ready(hba); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Get port(0x%x) clear state failed, turn on fail", + hba->port_cfg.port_id); + return ret; + } + /* At first, configure port table info if necessary */ + ret = hifc_config_port_table(hba); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) can't configurate port table", + hba->port_cfg.port_id); + + return ret; + } + } + + /* Switch physical port */ + ret = hifc_port_switch(hba, turn_on); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) switch failed", + hba->port_cfg.port_id); + return ret; + } + + /* Update HBA's sfp state */ + hba->sfp_on = turn_on; + + return ret; +} + +static unsigned int hifc_destroy_lport(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_LOWLEVEL_RELEASE_LOCAL_PORT(ret, v_hba->lport); + v_hba->lport = NULL; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) destroy L_Port done", + v_hba->port_cfg.port_id); + + return ret; +} + +unsigned int hifc_port_reset(struct hifc_hba_s *v_hba) +{ + unsigned int ret = RETURN_OK; + unsigned long time_out = 0; + int sfp_before_reset = UNF_FALSE; + int off_para_in = UNF_FALSE; + struct pci_dev *dev = NULL; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + dev = hba->pci_dev; + HIFC_CHECK(INVALID_VALUE32, dev, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) reset HBA begin", + hba->port_cfg.port_id); + + /* Wait for last init/reset completion */ + time_out = wait_for_completion_timeout( + &hba->hba_init_complete, + (unsigned long)HIFC_PORT_INIT_TIME_SEC_MAX * HZ); + + if (time_out == UNF_ZERO) { + UNF_TRACE(INVALID_VALUE32, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Last HBA initialize/reset timeout: %d second", + HIFC_PORT_INIT_TIME_SEC_MAX); + + return UNF_RETURN_ERROR; + } + + /* Save current port state */ + sfp_before_reset = hba->sfp_on; + + /* Inform the reset event to CM level before beginning */ + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RESET_START, NULL); + hba->reset_time = jiffies; + + /* Close SFP */ + ret = hifc_sfp_switch(hba, &off_para_in); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't close SFP", + hba->port_cfg.port_id); + hba->sfp_on = sfp_before_reset; + + complete(&hba->hba_init_complete); + + return ret; + } + + ret = hifc_port_check_fw_ready(hba); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Get port(0x%x) clear state failed, hang port and report chip error", + hba->port_cfg.port_id); + + complete(&hba->hba_init_complete); + return ret; + } + + hifc_queue_pre_process(hba, UNF_FALSE); + + ret = hifc_mbox_reset_chip(hba, HIFC_MBOX_SUBTYPE_LIGHT_RESET); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset chip mailbox", + hba->port_cfg.port_id); + + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, + UNF_PORT_GET_FWLOG, NULL); + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, + UNF_PORT_DEBUG_DUMP, NULL); + } + + /* Inform the success to CM level */ + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RESET_END, NULL); + + /* Queue open */ + hifc_enable_queues_dispatch(hba); + + /* Open SFP */ + (void)hifc_sfp_switch(hba, &sfp_before_reset); + + complete(&hba->hba_init_complete); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]Port(0x%x) reset HBA done", + hba->port_cfg.port_id); + + return ret; +#undef HIFC_WAIT_LINKDOWN_EVENT_MS +} + +static unsigned int hifc_delete_scqc_via_cmdq_sync(struct hifc_hba_s *v_hba, + unsigned int scqn) +{ + /* Via CMND Queue */ +#define HIFC_DEL_SCQC_TIMEOUT 3000 + + int ret; + struct hifcoe_cmdqe_delete_scqc_s del_scqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + /* Alloc cmd buffer */ + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf alloc failed"); + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC); + return UNF_RETURN_ERROR; + } + + /* Build & Send Cmnd */ + memset(&del_scqc_cmd, 0, sizeof(del_scqc_cmd)); + del_scqc_cmd.wd0.task_type = HIFCOE_TASK_T_DEL_SCQC; + del_scqc_cmd.wd1.scqn = HIFC_LSW(scqn); + hifc_cpu_to_big32(&del_scqc_cmd, sizeof(del_scqc_cmd)); + memcpy(cmdq_in_buf->buf, &del_scqc_cmd, sizeof(del_scqc_cmd)); + cmdq_in_buf->size = sizeof(del_scqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, + cmdq_in_buf, NULL, HIFC_DEL_SCQC_TIMEOUT); + + /* Free cmnd buffer */ + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send del scqc via cmdq failed, ret=0x%x", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC); + + return RETURN_OK; +} + +void hifc_flush_scq_ctx(struct hifc_hba_s *v_hba) +{ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Start destroy total 0x%x SCQC", HIFC_TOTAL_SCQ_NUM); + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + (void)hifc_delete_scqc_via_cmdq_sync(v_hba, 0); +} + +void hifc_set_hba_flush_state(struct hifc_hba_s *v_hba, int in_flush) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&v_hba->flush_state_lock, flag); + v_hba->in_flushing = in_flush; + spin_unlock_irqrestore(&v_hba->flush_state_lock, flag); +} + +static int hifc_hba_is_present(struct hifc_hba_s *v_hba) +{ + int ret = RETURN_OK; + int present = UNF_FALSE; + unsigned int vendor_id = 0; + + ret = pci_read_config_dword(v_hba->pci_dev, 0, &vendor_id); + vendor_id &= HIFC_PCI_VENDOR_ID_MASK; + if ((ret == RETURN_OK) && (vendor_id == HIFC_PCI_VENDOR_ID)) { + present = UNF_TRUE; + } else { + present = UNF_FALSE; + v_hba->dev_present = UNF_FALSE; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]Port %s remove: vender_id=0x%x, ret=0x%x", + present ? "normal" : "surprise", vendor_id, ret); + + return present; +} + +static void hifc_exit(struct pci_dev *v_dev, struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + int sfp_switch = UNF_FALSE; + int present = UNF_TRUE; + + v_hba->removing = UNF_TRUE; + + /* 1. Check HBA present or not */ + present = hifc_hba_is_present(v_hba); + if (present == UNF_TRUE) { + if (v_hba->phy_link == UNF_PORT_LINK_DOWN) + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHDONE; + + /* At first, close sfp */ + sfp_switch = UNF_FALSE; + (void)hifc_sfp_switch((void *)v_hba, (void *)&sfp_switch); + } + + /* 2. Report COM with HBA removing: delete route timer delay work */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_BEGIN_REMOVE, NULL); + + /* 3. Report COM with HBA Nop, COM release I/O(s) & R_Port(s) forcely */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_NOP, NULL); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]PCI device(%p) remove port(0x%x) failed", + v_dev, v_hba->port_index); + } + + if (present == UNF_TRUE) { + /* 4.1 Wait for all SQ empty, free SRQ buffer & SRQC */ + hifc_queue_pre_process(v_hba, UNF_TRUE); + } + + /* 5. Destroy L_Port */ + (void)hifc_destroy_lport(v_hba); + + /* 6. With HBA is present */ + if (present == UNF_TRUE) { + /* Enable Queues dispatch */ + hifc_enable_queues_dispatch(v_hba); + /* Need reset port if necessary */ + (void)hifc_mbox_reset_chip(v_hba, + HIFC_MBOX_SUBTYPE_HEAVY_RESET); + + /* Flush SCQ context */ + hifc_flush_scq_ctx(v_hba); + + /* Flush SRQ context */ + hifc_flush_srq_ctx(v_hba); + + /* Flush Root context in order to prevent DMA */ + hifc_flush_root_ctx(v_hba); + + /* + * NOTE: while flushing txrx, hash bucket will be cached out in + * UP. Wait to clear resources completely + */ + msleep(1000); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) flush scq & srq & root context done", + v_hba->port_cfg.port_id); + } + + /* 7. Notify uP to close timer before delete SCQ */ + ret = hifc_notify_up_close_timer(v_hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]HIFC port(0x%x) can't close timer", + v_hba->port_cfg.port_id); + } + + /* 8. Release host resources */ + hifc_release_host_res(v_hba); + + /* 9. Destroy FC work queue */ + if (v_hba->work_queue) { + flush_workqueue(v_hba->work_queue); + destroy_workqueue(v_hba->work_queue); + v_hba->work_queue = NULL; + } + + /* 10. Release Probe index & Decrease card number */ + hifc_release_probe_index(v_hba->probe_index); + hifc_dec_and_free_card_num((unsigned char)v_hba->card_info.card_num); + + /* 11. Free HBA memory */ + kfree(v_hba); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]PCI device(%p) remove succeed", v_dev); +} + +void hifc_remove(struct hifc_lld_dev *lld_dev, void *uld_dev) +{ + struct pci_dev *dev = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)uld_dev; + unsigned int probe_total_num = 0; + unsigned int probe_index = 0; + + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != uld_dev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->hwdev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->pdev, return); + + dev = hba->pci_dev; + + /* Get total probed port number */ + hifc_get_total_probed_num(&probe_total_num); + if (probe_total_num < 1) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port manager is empty and no need to remove"); + return; + } + + /* check pci vendor id */ + if (dev->vendor != HIFC_PCI_VENDOR_ID_HUAWEI) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Wrong vendor id(0x%x) and exit", dev->vendor); + return; + } + + /* Check function ability */ + + if (!(hifc_support_fc(lld_dev->hwdev, NULL))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]FC is not enable in this function"); + return; + } + + /* Get probe index */ + probe_index = hba->probe_index; + + /* Parent context allocation check */ + if (hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num == 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]FC parent context not allocate in this function"); + return; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]HBA(0x%x) start removing...", hba->port_index); + + /* HBA removinig... */ + hifc_exit(dev, hba); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) pci device removed, vendorid(0x%04x) devid(0x%04x)", + probe_index, dev->vendor, dev->device); + + /* Probe index check */ + if (probe_index < HIFC_HBA_PORT_MAX_NUM) { + hifc_hba[probe_index] = NULL; + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Probe index(0x%x) is invalid and remove failed", + probe_index); + } + + hifc_get_total_probed_num(&probe_total_num); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]Removed index=%u, RemainNum=%u", + probe_index, probe_total_num); +} + +void hifc_event(struct hifc_lld_dev *lld_dev, void *uld_dev, + struct hifc_event_info *event) +{ + struct hifc_hba_s *hba = uld_dev; + + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->hwdev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->pdev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return); + HIFC_CHECK(INVALID_VALUE32, NULL != event, return); + + switch (event->type) { + case HIFC_EVENT_HEART_LOST: + hba->heart_status = 0; + HIFC_COM_UP_ERR_EVENT_STAT(hba, HIFC_EVENT_HEART_LOST); + break; + default: + break; + } +} + +static unsigned int hifc_get_hba_pcie_link_state(void *v_hba, + void *v_link_state) +{ + int *link_state = v_link_state; + int present = UNF_TRUE; + struct hifc_hba_s *hba = v_hba; + int ret; + int last_dev_state = UNF_TRUE; + int cur_dev_state = UNF_TRUE; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_link_state, return UNF_RETURN_ERROR); + last_dev_state = hba->dev_present; + ret = hifc_get_card_present_state(hba->hw_dev_handle, (bool *)&present); + if (ret || present != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]port(0x%x) is not present,ret:%d, present:%d", + hba->port_cfg.port_id, ret, present); + cur_dev_state = UNF_FALSE; + } else { + cur_dev_state = UNF_TRUE; + } + + hba->dev_present = cur_dev_state; + + /* the heartbeat is considered lost only when the PCIE link is down for + * two times. + */ + if ((last_dev_state == UNF_FALSE) && (cur_dev_state == UNF_FALSE)) + hba->heart_status = UNF_FALSE; + *link_state = hba->dev_present; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_ABNORMAL, UNF_INFO, + "Port:0x%x,get dev present:%d", hba->port_cfg.port_id, + *link_state); + return RETURN_OK; +} diff --git a/drivers/scsi/huawei/hifc/hifc_hba.h b/drivers/scsi/huawei/hifc/hifc_hba.h new file mode 100644 index 000000000000..0ffa7c3402b0 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hba.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_HBA_H__ +#define __HIFC_HBA_H__ + +#include "unf_common.h" +#include "hifc_queue.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" + +#define HIFC_PCI_VENDOR_ID_MASK (0xffff) + +#define HIFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT 8 +#define HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT 255 +#define HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT 255 +#define HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT 255 +#define HIFC_LOWLEVEL_DEFAULT_BB_SCN 0 + +#define HIFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE 28081 +#define HIFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE 14100 +#define HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE 7000 +#define HIFC_LOWLEVEL_DEFAULT_ESCH_BUS_SIZE 0x2000 + +#define HIFC_SMARTIO_WORK_MODE_FC 0x1 +#define UNF_FUN_ID_MASK 0x07 +#define UNF_HIFC_FC 0x01 +#define UNF_HIFC_MAXNPIV_NUM 64 +#define HIFC_MAX_COS_NUM 8 +#define HIFC_PCI_VENDOR_ID_HUAWEI 0x19e5 +#define HIFC_SCQ_CNTX_SIZE 32 +#define HIFC_SRQ_CNTX_SIZE 64 +#define HIFC_PORT_INIT_TIME_SEC_MAX 1 + +#define HIFC_PORT_NAME_LABEL "hifc" +#define HIFC_PORT_NAME_STR_LEN 16 +#define HIFC_MAX_PROBE_PORT_NUM 64 +#define HIFC_PORT_NUM_PER_TABLE 64 +#define HIFC_MAX_CARD_NUM 32 +#define HIFC_HBA_PORT_MAX_NUM HIFC_MAX_PROBE_PORT_NUM +/* Heart Lost Flag */ +#define HIFC_EVENT_HEART_LOST 0 + +#define HIFC_GET_HBA_PORT_ID(__hba) ((__hba)->port_index) +#define HIFC_HBA_NOT_PRESENT(__hba) ((__hba)->dev_present == UNF_FALSE) + +struct hifc_port_cfg_s { + unsigned int port_id; /* Port ID */ + unsigned int port_mode; /* Port mode:INI(0x20) TGT(0x10) BOTH(0x30) */ + unsigned int port_topology; /* Port topo:0x3:loop,0xc:p2p,0xf:auto */ + unsigned int port_alpa; /* Port ALPA */ + unsigned int max_queue_depth;/* Max Queue depth Registration to SCSI */ + unsigned int sest_num; /* IO burst num:512-4096 */ + unsigned int max_login; /* Max Login Session. */ + unsigned int node_name_hi; /* nodename high 32 bits */ + unsigned int node_name_lo; /* nodename low 32 bits */ + unsigned int port_name_hi; /* portname high 32 bits */ + unsigned int port_name_lo; /* portname low 32 bits */ + /* Port speed 0:auto 4:4Gbps 8:8Gbps 16:16Gbps */ + unsigned int port_speed; + unsigned int interrupt_delay; /* Delay times(ms) in interrupt */ + unsigned int tape_support; /* tape support */ +}; + +#define HIFC_VER_INFO_SIZE 128 +struct hifc_drv_version_s { + char ver[HIFC_VER_INFO_SIZE]; +}; + +struct hifc_card_info_s { + unsigned int card_num : 8; + unsigned int func_num : 8; + unsigned int base_func : 8; + /* + * Card type:UNF_FC_SERVER_BOARD_32_G(6) 32G mode, + * UNF_FC_SERVER_BOARD_16_G(7)16G mode + */ + unsigned int card_type : 8; +}; + +struct hifc_card_num_manage_s { + int is_removing; + unsigned int port_count; + unsigned long long card_number; +}; + +struct hifc_led_state_s { + unsigned char green_speed_led; + unsigned char yellow_speed_led; + unsigned char ac_led; + unsigned char reserved; +}; + +enum hifc_queue_set_stage_e { + HIFC_QUEUE_SET_STAGE_INIT = 0, + HIFC_QUEUE_SET_STAGE_SCANNING, + HIFC_QUEUE_SET_STAGE_FLUSHING, + HIFC_QUEUE_SET_STAGE_FLUSHDONE, + HIFC_QUEUE_SET_STAGE_BUTT +}; + +struct hifc_srq_delay_info_s { + unsigned char srq_delay_flag; /* Check whether need to delay */ + unsigned char root_rq_rcvd_flag; + unsigned short rsd; + spinlock_t srq_lock; + struct unf_frame_pkg_s pkg; + struct delayed_work del_work; +}; + +struct hifc_fw_ver_detail_s { + unsigned char ucode_ver[HIFC_VER_LEN]; + unsigned char ucode_compile_time[HIFC_COMPILE_TIME_LEN]; + unsigned char up_ver[HIFC_VER_LEN]; + unsigned char up_compile_time[HIFC_COMPILE_TIME_LEN]; + unsigned char boot_ver[HIFC_VER_LEN]; + unsigned char boot_compile_time[HIFC_COMPILE_TIME_LEN]; +}; + +/* get wwpn and wwnn */ +struct hifc_chip_info_s { + unsigned char work_mode; + unsigned char tape_support; + unsigned long long wwpn; + unsigned long long wwnn; +}; + +struct hifc_hba_s { + struct pci_dev *pci_dev; + void *hw_dev_handle; + struct fc_service_cap fc_service_cap; + struct hifc_scq_info_s scq_info[HIFC_TOTAL_SCQ_NUM]; + struct hifc_srq_info_s els_srq_info; + /* PCI IO Memory */ + void __iomem *bar0; + unsigned int bar0_len; + + struct hifc_root_info_s root_info; + struct hifc_parent_queue_mgr_s *parent_queue_mgr; + + /* Link list Sq WqePage Pool */ + struct hifc_sq_wqe_page_pool_s sq_wpg_pool; + + enum hifc_queue_set_stage_e q_set_stage; + unsigned int next_clearing_sq; + unsigned int default_sq_id; + /* Port parameters, Obtained through firmware */ + unsigned short q_s_max_count; + unsigned char port_type; /* FC Port */ + unsigned char port_index; /* Phy Port */ + unsigned int default_scqn; + + unsigned char chip_type; /* chiptype:Smart or fc */ + unsigned char work_mode; + struct hifc_card_info_s card_info; + char port_name[HIFC_PORT_NAME_STR_LEN]; + unsigned int probe_index; + + unsigned short exit_base; + unsigned short exit_count; + unsigned short image_count; + unsigned char vpid_start; + unsigned char vpid_end; + + spinlock_t flush_state_lock; + int in_flushing; + + struct hifc_port_cfg_s port_cfg; /* Obtained through Config */ + + void *lport; /* Used in UNF level */ + + unsigned char sys_node_name[UNF_WWN_LEN]; + unsigned char sys_port_name[UNF_WWN_LEN]; + + struct completion hba_init_complete; + struct completion mbox_complete; + + unsigned short removing; + int sfp_on; + int dev_present; + int heart_status; + spinlock_t hba_lock; + unsigned int port_topo_cfg; + unsigned int port_bbscn_cfg; + unsigned int port_loop_role; + unsigned int port_speed_cfg; + unsigned int max_support_speed; + + unsigned char remote_rttov_tag; + unsigned char remote_edtov_tag; + unsigned short compared_bbscn; + unsigned short remote_bbcredit; + unsigned int compared_edtov_val; + unsigned int compared_ratov_val; + enum unf_act_topo_e active_topo; + unsigned int active_port_speed; + unsigned int active_rx_bb_credit; + unsigned int active_bb_scn; + unsigned int phy_link; + unsigned int fcp_conf_cfg; + /* loop */ + unsigned char active_al_pa; + unsigned char loop_map_valid; + unsigned char loop_map[UNF_LOOPMAP_COUNT]; + + unsigned int cos_bit_map; + atomic_t cos_rport_cnt[HIFC_MAX_COS_NUM]; + struct hifc_led_state_s led_states; + unsigned int fec_status; + struct workqueue_struct *work_queue; + unsigned long long reset_time; + struct hifc_srq_delay_info_s delay_info; +}; + +enum drv_port_entity_type_e { + DRV_PORT_ENTITY_TYPE_PHYSICAL = 0, + DRV_PORT_ENTITY_TYPE_VIRTUAL = 1, + DRV_PORT_ENTITY_TYPE_BUTT +}; + +extern struct hifc_hba_s *hifc_hba[HIFC_HBA_PORT_MAX_NUM]; +extern spinlock_t probe_spin_lock; +extern unsigned long probe_bit_map[HIFC_MAX_PROBE_PORT_NUM / + HIFC_PORT_NUM_PER_TABLE]; + +unsigned int hifc_port_reset(struct hifc_hba_s *v_hba); +void hifc_flush_scq_ctx(struct hifc_hba_s *v_hba); +void hifc_set_hba_flush_state(struct hifc_hba_s *v_hba, int in_flush); +void hifc_get_total_probed_num(unsigned int *v_probe_cnt); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_queue.c b/drivers/scsi/huawei/hifc/hifc_queue.c new file mode 100644 index 000000000000..2c932d26bf90 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_queue.c @@ -0,0 +1,7020 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "hifc_queue.h" +#include "hifc_module.h" +#include "hifc_wqe.h" +#include "hifc_service.h" +#include "hifc_chipitf.h" +#include "hifc_cqm_object.h" +#include "hifc_cqm_main.h" + +#define HIFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT 0 + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) +#define HIFC_DONE_MASK (0x00000001) +#else +#define HIFC_DONE_MASK (0x01000000) +#endif +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) +#define HIFC_OWNER_MASK (0x80000000) +#else +#define HIFC_OWNER_MASK (0x00000080) +#endif +#define HIFC_SQ_LINK_PRE (1 << 2) + +#define HIFC_SQ_HEADER_ADDR_ALIGN_SIZE (64) +#define HIFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK (HIFC_SQ_HEADER_ADDR_ALIGN_SIZE - 1) + +#define HIFC_ADDR_64_ALIGN(addr)\ + (((addr) + (HIFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK)) &\ + ~(HIFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK)) + +static unsigned int hifc_get_parity_value(unsigned long long *v_src_data, + unsigned int v_row, + unsigned int v_column) +{ + unsigned int i = 0; + unsigned int j = 0; + unsigned int offset = 0; + unsigned int group = 0; + unsigned int bit_offset = 0; + unsigned int bit_val = 0; + unsigned int tmp_val = 0; + unsigned int dest_data = 0; + + for (i = 0; i < v_row; i++) { + for (j = 0; j < v_column; j++) { + offset = (v_row * j + i); + group = offset / (sizeof(v_src_data[0]) * 8); + bit_offset = offset % (sizeof(v_src_data[0]) * 8); + tmp_val = (v_src_data[group] >> bit_offset) & 0x1; + + if (j == 0) { + bit_val = tmp_val; + continue; + } + + bit_val ^= tmp_val; + } + + bit_val = (~bit_val) & 0x1; + + dest_data |= (bit_val << i); + } + + return dest_data; +} + +/** + * hifc_update_producer_info - update producer pi and obit value + * @q_depth: queue max depth + * @v_pi: pi vaue after updated queue + * @v_owner: owner vaue after updated queue + */ +static void hifc_update_producer_info(unsigned short q_depth, + unsigned short *v_pi, + unsigned short *v_owner) +{ + unsigned short cur_pi = 0; + unsigned short next_pi = 0; + unsigned short owner = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_pi, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_owner, return); + + cur_pi = *v_pi; + next_pi = cur_pi + 1; + + if (next_pi < q_depth) { + *v_pi = next_pi; + } else { + /* PI reversal */ + *v_pi = 0; + + /* obit reversal */ + owner = *v_owner; + *v_owner = !owner; + } +} + +/** + * hifc_update_consumer_info - update consumer ci and obit value + * @q_depth: queue max deppth + * @v_ci: ci vaue after updated queue + * @v_owner: owner vaue after updated queue + */ +static void hifc_update_consumer_info(unsigned short q_depth, + unsigned short *v_ci, + unsigned short *v_owner) +{ + unsigned short cur_ci = 0; + unsigned short next_ci = 0; + unsigned short owner = 0; + + cur_ci = *v_ci; + next_ci = cur_ci + 1; + + if (next_ci < q_depth) { + *v_ci = next_ci; + } else { + /* CI reversal */ + *v_ci = 0; + + /* obit reversal */ + owner = *v_owner; + *v_owner = !owner; + } +} + +static inline void hifc_update_cq_header(struct hifc_ci_record_s *v_ci_record, + unsigned short ci, + unsigned short owner) +{ + unsigned int size = 0; + struct hifc_ci_record_s ci_record = { 0 }; + + size = sizeof(struct hifc_ci_record_s); + memcpy(&ci_record, v_ci_record, size); + hifc_big_to_cpu64(&ci_record, size); + + ci_record.cmsn = ci + + (unsigned short)(owner << HIFC_CQ_HEADER_OWNER_SHIFT); + ci_record.dump_cmsn = ci_record.cmsn; + hifc_cpu_to_big64(&ci_record, size); + + wmb(); + memcpy(v_ci_record, &ci_record, size); +} + +static void hifc_update_srq_header(struct hifc_db_record *v_pmsn_record, + unsigned short pmsn) +{ + unsigned int size = 0; + struct hifc_db_record pmsn_record = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_pmsn_record, return); + + size = sizeof(struct hifc_db_record); + memcpy(&pmsn_record, v_pmsn_record, size); + hifc_big_to_cpu64(&pmsn_record, size); + + pmsn_record.pmsn = pmsn; + pmsn_record.dump_pmsn = pmsn_record.pmsn; + hifc_cpu_to_big64(&pmsn_record, sizeof(struct hifc_db_record)); + + wmb(); + memcpy(v_pmsn_record, &pmsn_record, size); +} + +static unsigned int hifc_alloc_root_sq_info( + struct hifc_root_info_s *v_root_info) +{ + unsigned int sq_info_size = 0; + struct hifc_root_sq_info_s *root_sq_info = NULL; + + sq_info_size = (unsigned int) + (sizeof(struct hifc_root_sq_info_s) * v_root_info->sq_num); + root_sq_info = (struct hifc_root_sq_info_s *)kmalloc(sq_info_size, + GFP_ATOMIC); + if (!root_sq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Root SQ(s) failed"); + + return UNF_RETURN_ERROR; + } + + memset(root_sq_info, 0, sq_info_size); + v_root_info->sq_info = root_sq_info; + + return RETURN_OK; +} + +static void hifc_free_root_sq_info(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index; + struct hifc_root_sq_info_s *sq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + for (q_index = 0; q_index < v_root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(v_root_info->sq_info) + + q_index; + UNF_REFERNCE_VAR(sq_info); + } + kfree(v_root_info->sq_info); + v_root_info->sq_info = NULL; +} + +static void hifc_init_root_sq_base_info(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned short global_base_qpn = 0; + unsigned short max_sq_num = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_root_info->phba; + global_base_qpn = hifc_get_global_base_qpn(hba->hw_dev_handle); + max_sq_num = hifc_func_max_qnum(hba->hw_dev_handle); + + for (q_index = 0; q_index < v_root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(v_root_info->sq_info) + + q_index; + sq_info->qid = (unsigned short)q_index; + sq_info->max_qnum = max_sq_num; + spin_lock_init(&sq_info->root_sq_spin_lock); + sq_info->q_depth = HIFC_ROOT_SQ_DEPTH; + sq_info->wqe_bb_size = HIFC_ROOT_SQ_WQEBB; + sq_info->root_info = v_root_info; + sq_info->global_qpn = global_base_qpn + q_index; + sq_info->owner = HIFC_ROOT_SQ_LOOP_OWNER; + sq_info->in_flush = UNF_FALSE; + } +} + +static unsigned int hifc_alloc_root_sq_ci_addr( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int ci_addr_size = 0; + unsigned int ci_addr_offset = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + + /* Alignment with 4 Bytes */ + ci_addr_size = HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE * v_root_info->sq_num; + hba = (struct hifc_hba_s *)v_root_info->phba; + + v_root_info->virt_sq_ci_table_buff = dma_alloc_coherent( + &hba->pci_dev->dev, + ci_addr_size, + &v_root_info->sq_ci_table_dma, + GFP_KERNEL); + if (!v_root_info->virt_sq_ci_table_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Root SQ CI table failed"); + + return UNF_RETURN_ERROR; + } + memset(v_root_info->virt_sq_ci_table_buff, 0, ci_addr_size); + v_root_info->sq_ci_table_size = ci_addr_size; + + for (q_index = 0; q_index < v_root_info->sq_num; q_index++) { + ci_addr_offset = q_index * HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE; + sq_info = (struct hifc_root_sq_info_s *)(v_root_info->sq_info) + + q_index; + sq_info->ci_addr = (unsigned short *) + ((void *) + (((unsigned char *)v_root_info->virt_sq_ci_table_buff) + + ci_addr_offset)); + sq_info->ci_dma_addr = v_root_info->sq_ci_table_dma + + ci_addr_offset; + } + + return RETURN_OK; +} + +static void hifc_free_root_sq_ci_addr(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + dma_free_coherent(&hba->pci_dev->dev, root_info->sq_ci_table_size, + root_info->virt_sq_ci_table_buff, + root_info->sq_ci_table_dma); + root_info->virt_sq_ci_table_buff = NULL; + root_info->sq_ci_table_dma = 0; + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + sq_info->ci_addr = NULL; + sq_info->ci_dma_addr = 0; + } +} + +static unsigned int hifc_alloc_root_sq_buff( + struct hifc_root_info_s *v_root_info) +{ + int ret = 0; + unsigned int q_index = 0; + unsigned int back_q_num = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + /* Wqe_Base_Size:64; Depth:2048; Page_Size:4096 */ + ret = hifc_slq_alloc(hba->hw_dev_handle, sq_info->wqe_bb_size, + sq_info->q_depth, (u16)PAGE_SIZE, + (u64 *)&sq_info->cla_addr, + &sq_info->sq_handle); + if ((ret != 0) || (!sq_info->sq_handle) || + (sq_info->cla_addr == 0)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]Port(0x%x) slq_allocate Root SQ WQE buffer failed, SQ index = %u, return %u", + hba->port_cfg.port_id, q_index, ret); + + goto free_sq_wqe_buff; + } + } + + return RETURN_OK; + +free_sq_wqe_buff: + back_q_num = q_index; + + for (q_index = 0; q_index < back_q_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, sq_info->sq_handle); + sq_info->sq_handle = NULL; + sq_info->cla_addr = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_sq_buff(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, sq_info->sq_handle); + sq_info->sq_handle = NULL; + sq_info->cla_addr = 0; + } +} + +irqreturn_t hifc_root_sq_irq(int v_irq, void *v_sq_info) +{ + struct hifc_root_sq_info_s *sq_info = NULL; + unsigned short cur_ci = 0; + static unsigned int enter_num; + + enter_num++; + sq_info = (struct hifc_root_sq_info_s *)v_sq_info; + + cur_ci = *sq_info->ci_addr; + cur_ci = be16_to_cpu(cur_ci); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[event]Root SQ Irq Enter Num is %u, Root SQ Ci is %u", + enter_num, cur_ci); + HIFC_REFERNCE_VAR(enter_num, INVALID_VALUE32, IRQ_HANDLED) + HIFC_REFERNCE_VAR(cur_ci, INVALID_VALUE16, IRQ_HANDLED) + + return IRQ_HANDLED; +} + +/* + * hifc_alloc_root_sq_int - Allocate interrupt resources in Root SQ, and + * register callback function. + * @v_root_info: root sq struct info + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_alloc_root_sq_int(struct hifc_root_info_s *v_root_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned int q_index = 0; + unsigned int cfg_num = 0; + unsigned short act_num = 0; + struct irq_info irq_info; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + ret = hifc_alloc_irqs(hba->hw_dev_handle, SERVICE_T_FC, + HIFC_INT_NUM_PER_QUEUE, &irq_info, + &act_num); + if ((ret != RETURN_OK) || + (act_num != HIFC_INT_NUM_PER_QUEUE)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]cfg_alloc_irqs Root SQ irq failed, SQ Index = 0x%x, return 0x%x", + q_index, ret); + + goto free_irq; + } + + if (irq_info.msix_entry_idx >= HIFC_ROOT_Q_INT_ID_MAX) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]cfg_alloc_irqs Root SQ irq id exceed 1024, msix_entry_idx 0x%x", + irq_info.msix_entry_idx); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + irq_info.irq_id); + goto free_irq; + } + + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + sq_info->irq_id = (unsigned int)(irq_info.irq_id); + sq_info->msix_entry_idx = (unsigned short) + (irq_info.msix_entry_idx); + + ret = snprintf(sq_info->irq_name, HIFC_IRQ_NAME_MAX - 1, + "Root SQ 0x%x", q_index); + UNF_FUNCTION_RETURN_CHECK(ret, HIFC_IRQ_NAME_MAX - 1); + ret = request_irq(sq_info->irq_id, hifc_root_sq_irq, 0UL, + sq_info->irq_name, sq_info); + hifc_set_msix_state(hba->hw_dev_handle, sq_info->msix_entry_idx, + HIFC_MSIX_ENABLE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]UNF_OS_REQUEST_IRQ Root SQ irq failed, SQ Index = 0x%x, return 0x%x", + q_index, ret); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + sq_info->irq_id); + sq_info->irq_id = 0; + sq_info->msix_entry_idx = 0; + goto free_irq; + } + } + + return RETURN_OK; + +free_irq: + cfg_num = q_index; + + for (q_index = 0; q_index < cfg_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + free_irq(sq_info->irq_id, sq_info); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + sq_info->irq_id); + sq_info->irq_id = 0; + sq_info->msix_entry_idx = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_sq_int(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + hifc_set_msix_state(hba->hw_dev_handle, sq_info->msix_entry_idx, + HIFC_MSIX_DISABLE); + free_irq(sq_info->irq_id, sq_info); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + sq_info->irq_id); + sq_info->irq_id = 0; + sq_info->msix_entry_idx = 0; + } +} + +/* + * hifc_cfg_root_sq_ci_tbl - Configure CI address in SQ and interrupt number. + * @v_root_info: root queue info + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_cfg_root_sq_ci_tbl( + struct hifc_root_info_s *v_root_info) +{ + int ret = 0; + unsigned int queue_index = 0; + dma_addr_t ci_dma_addr = 0; + struct hifc_sq_attr sq_ci_attr; + struct hifc_root_sq_info_s *sq_info = NULL; + void *handle = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + handle = ((struct hifc_hba_s *)v_root_info->phba)->hw_dev_handle; + + for (queue_index = 0; queue_index < v_root_info->sq_num; + queue_index++) { + /* Sync CI addr to hw, cfg attribute table format */ + memset(&sq_ci_attr, 0, sizeof(struct hifc_sq_attr)); + sq_info = (struct hifc_root_sq_info_s *)v_root_info->sq_info + + queue_index; + + sq_ci_attr.dma_attr_off = 0; + sq_ci_attr.pending_limit = 0; + sq_ci_attr.coalescing_time = 0; + sq_ci_attr.intr_en = HIFC_INT_ENABLE; + sq_ci_attr.intr_idx = sq_info->msix_entry_idx; + sq_ci_attr.l2nic_sqn = queue_index; + ci_dma_addr = HIFC_GET_ROOT_SQ_CI_ADDR(sq_info->ci_dma_addr, + queue_index); + sq_ci_attr.ci_dma_base = ci_dma_addr >> + HIFC_ROOT_SQ_CI_ATTRIBUTE_ADDRESS_SHIFT; + + /* Little endian used in UP */ + ret = hifc_set_ci_table(handle, sq_info->qid, &sq_ci_attr); + if (ret != 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]hifc_set_ci_table failed, return %d", + ret); + + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +/** + * hifc_alloc_root_sq_db - Allocate Doorbell buffer in root SQ + * @v_root_info: root queue struct info + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_alloc_root_sq_db(struct hifc_root_info_s *v_root_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned int q_index = 0; + unsigned int cfg_num = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + ret = hifc_alloc_db_addr(hba->hw_dev_handle, + &sq_info->normal_db.virt_map_addr, + NULL); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]Allocate Root SQ DB address failed, SQ Index = %u, return %d", + q_index, ret); + + goto free_buff; + } + + if (!sq_info->normal_db.virt_map_addr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]virt_map_addr is invalid, SQ Index = %u", + q_index); + + goto free_buff; + } + } + + return RETURN_OK; + +free_buff: + cfg_num = q_index; + + for (q_index = 0; q_index < cfg_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + hifc_free_db_addr(hba->hw_dev_handle, + sq_info->normal_db.virt_map_addr, NULL); + sq_info->normal_db.virt_map_addr = NULL; + sq_info->normal_db.phy_addr = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_afree_root_sq_db(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + hifc_free_db_addr(hba->hw_dev_handle, + sq_info->normal_db.virt_map_addr, NULL); + sq_info->normal_db.virt_map_addr = NULL; + + sq_info->normal_db.phy_addr = 0; + } +} + +static void hifc_assemble_root_sq_ctx(unsigned int cmd_sq_num, + struct hifc_root_sq_info_s *v_sq_info, + void *v_buf) +{ + unsigned int q_index = 0; + unsigned long long ci_init_addr = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + + struct hifc_qp_ctxt_header *cmdq_header = NULL; + struct hifc_sq_ctxt *sq_ctx = NULL; + struct hifc_sq_ctxt_block *sq_ctx_block = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buf, return); + + sq_info = v_sq_info; + sq_ctx_block = (struct hifc_sq_ctxt_block *)v_buf; + cmdq_header = &sq_ctx_block->cmdq_hdr; + + /* CMD header initialization */ + cmdq_header->num_queues = (unsigned short)cmd_sq_num; + cmdq_header->queue_type = HIFC_CMDQ_QUEUE_TYPE_SQ; + cmdq_header->addr_offset = HIFC_ROOT_SQ_CTX_OFFSET(sq_info->max_qnum, + sq_info->qid); + + /* CMD Header convert to big endian */ + hifc_cpu_to_big32(cmdq_header, sizeof(struct hifc_qp_ctxt_header)); + + for (q_index = 0; q_index < cmd_sq_num; q_index++) { + sq_info = v_sq_info + q_index; + sq_ctx = &sq_ctx_block->sq_ctx[q_index]; + memset(sq_ctx, 0, sizeof(struct hifc_sq_ctxt)); + + sq_ctx->sq_ctx_dw0.global_sq_id = sq_info->global_qpn; + sq_ctx->sq_ctx_dw0.ceq_num = 0; + sq_ctx->sq_ctx_dw1.owner = HIFC_ROOT_SQ_LOOP_OWNER; + + ci_init_addr = hifc_slq_get_first_pageaddr(sq_info->sq_handle); + + sq_ctx->sq_ctx_dw2.ci_wqe_page_addr_hi = + HIFC_CI_WQE_PAGE_HIGH_ADDR(ci_init_addr); + sq_ctx->ci_wqe_page_addr_lo = + HIFC_CI_WQE_PAGE_LOW_ADDR(ci_init_addr); + sq_ctx->sq_ctx_dw4.prefetch_min = + HIFC_ROOT_CTX_WQE_PREFETCH_MIN; + sq_ctx->sq_ctx_dw4.prefetch_max = + HIFC_ROOT_CTX_WQE_PREFETCH_MAX; + sq_ctx->sq_ctx_dw4.prefetch_cache_threshold = + HIFC_ROOT_CTX_WQE_PRERETCH_THRESHOLD; + sq_ctx->sq_ctx_dw5.prefetch_owner = HIFC_ROOT_SQ_LOOP_OWNER; + sq_ctx->sq_ctx_dw6.prefetch_ci_wqe_addr_hi = + HIFC_CI_WQE_PAGE_HIGH_ADDR(ci_init_addr); + sq_ctx->prefetch_ci_wqe_addr_lo = + HIFC_CI_WQE_PAGE_LOW_ADDR(ci_init_addr); + sq_ctx->sq_ctx_dw10.cla_addr_hi = + HIFC_CLA_HIGH_ADDR(sq_info->cla_addr); + sq_ctx->cla_addr_lo = HIFC_CLA_LOW_ADDR(sq_info->cla_addr); + + /* big-little endian convert */ + hifc_cpu_to_big32(sq_ctx, sizeof(struct hifc_sq_ctxt)); + } +} + +static unsigned int hifc_cfg_root_sq_ctx(unsigned int cmd_sq_num, + void *v_handle, + struct hifc_cmd_buf *v_chipif_cmd_buff) +{ + int ret = 0; + unsigned short buff_used_size = 0; + unsigned int time_out = 0xF0000000; + unsigned long long uc_return = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_chipif_cmd_buff, + return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(uc_return); + UNF_REFERNCE_VAR(time_out); + UNF_REFERNCE_VAR(ret); + + buff_used_size = (unsigned short)(sizeof(struct hifc_qp_ctxt_header) + + sizeof(struct hifc_sq_ctxt) * cmd_sq_num); + v_chipif_cmd_buff->size = buff_used_size; + + ret = hifc_cmdq_direct_resp(v_handle, + HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_L2NIC, + HIFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT, + v_chipif_cmd_buff, + (u64 *)&uc_return, + time_out); + if ((ret != RETURN_OK) || (uc_return != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]chipif_cmd_to_ucode_imm failed, uiret %d, ullUcRet %llu", + ret, uc_return); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int hifc_calc_cmd_sq_num(unsigned int remain_sq_num) +{ + unsigned int sq_num = 0; + + if (remain_sq_num < HIFC_ROOT_CFG_SQ_NUM_MAX) + sq_num = remain_sq_num; + else + sq_num = HIFC_ROOT_CFG_SQ_NUM_MAX; + + return sq_num; +} + +static unsigned int hifc_init_root_sq_ctx(struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmd_sq_num = 0; + unsigned int remain_sq_num = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_cmd_buf *chipif_cmd_buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info); + + chipif_cmd_buf = hifc_alloc_cmd_buf(hba->hw_dev_handle); + if (!chipif_cmd_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_alloc_cmd_buf failed."); + + return ENOMEM; + } + + remain_sq_num = root_info->sq_num; + while (remain_sq_num > 0) { + cmd_sq_num = hifc_calc_cmd_sq_num(remain_sq_num); + remain_sq_num -= cmd_sq_num; + + /* Assemble root SQ context */ + hifc_assemble_root_sq_ctx(cmd_sq_num, sq_info, + chipif_cmd_buf->buf); + + /* Send via ucode */ + ret = hifc_cfg_root_sq_ctx(cmd_sq_num, hba->hw_dev_handle, + chipif_cmd_buf); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]hifc_cfg_root_sq_ctx failed, return %u", + ret); + break; + } + + sq_info = sq_info + cmd_sq_num; + } + + /* Release cmd buffer */ + hifc_free_cmd_buf(hba->hw_dev_handle, chipif_cmd_buf); + return ret; +} + +static unsigned int hifc_create_root_sqs(struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + /* 1. Allocate sqinfo */ + ret = hifc_alloc_root_sq_info(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_info failed, return %u", + ret); + + return ret; + } + + /* 2. Initialize sqinfo */ + hifc_init_root_sq_base_info(v_root_info); + + /* 3. Apply SQ CI address */ + ret = hifc_alloc_root_sq_ci_addr(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_ci_addr failed, return %u", + ret); + + goto free_sq_info; + } + + /* 4. Allocate SQ buffer */ + ret = hifc_alloc_root_sq_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_buff failed, return %u", + ret); + + goto free_sq_ci_addr; + } + + /* 5. Register SQ(s) interrupt */ + ret = hifc_alloc_root_sq_int(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_int failed, return %u", + ret); + + goto free_root_sq_buff; + } + + /* 6. Configure CI address in SQ and interrupt number */ + ret = hifc_cfg_root_sq_ci_tbl(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]hifc_cfg_root_sq_ci_tbl failed, return %u", + ret); + + goto free_root_sq_int; + } + + /* 7. Allocate Doorbell buffer */ + ret = hifc_alloc_root_sq_db(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_db failed, return %u", + ret); + + goto free_root_sq_int; + } + + /* 8. Initialize SQ context */ + ret = hifc_init_root_sq_ctx(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]hifc_init_root_sq_ctx failed, return %u", + ret); + + goto free_db; + } + + return RETURN_OK; + +free_db: + hifc_afree_root_sq_db(v_root_info); + +free_root_sq_int: + hifc_free_root_sq_int(v_root_info); + +free_root_sq_buff: + hifc_free_root_sq_buff(v_root_info); + +free_sq_ci_addr: + hifc_free_root_sq_ci_addr(v_root_info); + +free_sq_info: + hifc_free_root_sq_info(v_root_info); + + return ret; +} + +static void hifc_destroy_root_sqs(struct hifc_root_info_s *v_root_info) +{ + /* Free DB resources */ + hifc_afree_root_sq_db(v_root_info); + + /* Free interrupt resources */ + hifc_free_root_sq_int(v_root_info); + + /* Free WQE buffers */ + hifc_free_root_sq_buff(v_root_info); + + /* Free CI address */ + hifc_free_root_sq_ci_addr(v_root_info); + + /* Free Root SQ struct */ + hifc_free_root_sq_info(v_root_info); +} + +static unsigned int hifc_alloc_root_rq_info( + struct hifc_root_info_s *v_root_info) +{ + unsigned int rq_info_size = 0; + struct hifc_root_rq_info_s *root_rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + rq_info_size = (unsigned int) + (sizeof(struct hifc_root_rq_info_s) * + v_root_info->rq_num); + root_rq_info = (struct hifc_root_rq_info_s *)kmalloc(rq_info_size, + GFP_ATOMIC); + if (!root_rq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Allocate Root RQ(s) failed"); + + return UNF_RETURN_ERROR; + } + memset(root_rq_info, 0, rq_info_size); + + v_root_info->rq_info = root_rq_info; + + return RETURN_OK; +} + +static void hifc_free_root_rq_info(struct hifc_root_info_s *v_root_info) +{ + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + kfree(root_info->rq_info); + root_info->rq_info = NULL; +} + +static void hifc_init_root_rq_basic_info(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned short global_base_qpn = 0; + unsigned short max_q_num = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + hba = (struct hifc_hba_s *)v_root_info->phba; + global_base_qpn = hifc_get_global_base_qpn(hba->hw_dev_handle); + max_q_num = hifc_func_max_qnum(hba->hw_dev_handle); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + rq_info->max_qnum = max_q_num; + rq_info->qid = (unsigned short)q_index; + rq_info->q_depth = HIFC_ROOT_RQ_DEPTH; + rq_info->wqe_bb_size = HIFC_ROOT_RQ_WQEBB; + rq_info->root_info = v_root_info; + rq_info->global_qpn = global_base_qpn + q_index; + rq_info->owner = HIFC_ROOT_RQ_LOOP_OWNER; + } +} + +static unsigned int hifc_alloc_root_rq_pi_addr( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int pi_addr_size = 0; + unsigned int pi_addr_offset = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + pi_addr_size = HIFC_ROOT_RQ_PI_TABLE_STEP_BYTE * v_root_info->rq_num; + hba = (struct hifc_hba_s *)v_root_info->phba; + + v_root_info->virt_rq_pi_table_buff = + dma_alloc_coherent(&hba->pci_dev->dev, pi_addr_size, + &v_root_info->rq_pi_table_dma, + GFP_KERNEL); + if (!v_root_info->virt_rq_pi_table_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Allocate Root RQ PI table failed"); + + return UNF_RETURN_ERROR; + } + memset(v_root_info->virt_rq_pi_table_buff, 0, pi_addr_size); + v_root_info->rq_pi_table_size = pi_addr_size; + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + pi_addr_offset = q_index * HIFC_ROOT_RQ_PI_TABLE_STEP_BYTE; + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + rq_info->pi_vir_addr = + (unsigned short *) + ((unsigned long long)v_root_info->virt_rq_pi_table_buff + + pi_addr_offset); + rq_info->pi_dma_addr = v_root_info->rq_pi_table_dma + + pi_addr_offset; + } + + return RETURN_OK; +} + +static void hifc_free_root_rq_pi_addr(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + dma_free_coherent(&hba->pci_dev->dev, root_info->rq_pi_table_size, + root_info->virt_rq_pi_table_buff, + root_info->rq_pi_table_dma); + root_info->virt_rq_pi_table_buff = NULL; + root_info->rq_pi_table_dma = 0; + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + rq_info->pi_vir_addr = NULL; + rq_info->pi_dma_addr = 0; + } +} + +static unsigned int hifc_alloc_root_rq_buff( + struct hifc_root_info_s *v_root_info) +{ + int ret = 0; + unsigned int q_index = 0; + unsigned int back_q_num = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)(v_root_info->phba); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + + /* Wqe_Base_Size:32; Depth:2048; Page_Size:4096 */ + ret = hifc_slq_alloc(hba->hw_dev_handle, rq_info->wqe_bb_size, + rq_info->q_depth, (u16)PAGE_SIZE, + (u64 *)&rq_info->ci_cla_tbl_addr, + &rq_info->rq_handle); + if ((ret != 0) || (!rq_info->rq_handle) || + (rq_info->ci_cla_tbl_addr == 0)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]slq_allocate Root RQ Buffer failed, RQ Index = %u, return %u", + q_index, ret); + + goto free_rq_buff; + } + } + + return RETURN_OK; + +free_rq_buff: + back_q_num = q_index; + + for (q_index = 0; q_index < back_q_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, rq_info->rq_handle); + rq_info->rq_handle = NULL; + rq_info->ci_cla_tbl_addr = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_buff(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, rq_info->rq_handle); + rq_info->rq_handle = NULL; + rq_info->ci_cla_tbl_addr = 0; + } +} + +irqreturn_t hifc_root_rq_irq(int v_irq, void *v_rq_info) +{ + HIFC_CHECK(INVALID_VALUE32, NULL != v_rq_info, return IRQ_NONE); + + tasklet_schedule(&((struct hifc_root_rq_info_s *)v_rq_info)->tasklet); + + return IRQ_HANDLED; +} + +static unsigned int hifc_alloc_root_rq_int(struct hifc_root_info_s *v_root_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned int q_index = 0; + unsigned int cfg_num = 0; + unsigned short act_num = 0; + struct irq_info irq_info; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)(v_root_info->phba); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + ret = hifc_alloc_irqs(hba->hw_dev_handle, SERVICE_T_FC, + HIFC_INT_NUM_PER_QUEUE, &irq_info, + &act_num); + if ((ret != RETURN_OK) || (act_num != HIFC_INT_NUM_PER_QUEUE)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]cfg_alloc_irqs Root RQ irq failed, RQ Index = %u, return %d", + q_index, ret); + + goto free_irq; + } + + if (irq_info.msix_entry_idx >= HIFC_ROOT_Q_INT_ID_MAX) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[warn]cfg_alloc_irqs Root RQ irq id exceed 1024, msix_entry_idx %u", + irq_info.msix_entry_idx); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + irq_info.irq_id); + goto free_irq; + } + + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + rq_info->irq_id = (unsigned int)(irq_info.irq_id); + rq_info->msix_entry_idx = (unsigned short) + (irq_info.msix_entry_idx); + + ret = snprintf(rq_info->irq_name, HIFC_IRQ_NAME_MAX - 1, + "Root RQ %u", q_index); + UNF_FUNCTION_RETURN_CHECK(ret, HIFC_IRQ_NAME_MAX - 1); + + tasklet_init(&rq_info->tasklet, hifc_process_root_rqe, + (unsigned long)rq_info); + + ret = request_irq(rq_info->irq_id, hifc_root_rq_irq, 0UL, + rq_info->irq_name, rq_info); + hifc_set_msix_state(hba->hw_dev_handle, rq_info->msix_entry_idx, + HIFC_MSIX_ENABLE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]UNF_OS_REQUEST_IRQ Root RQ irq failed, RQ Index = %u, return %d", + q_index, ret); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + rq_info->irq_id); + memset(rq_info->irq_name, 0, HIFC_IRQ_NAME_MAX); + rq_info->irq_id = 0; + rq_info->msix_entry_idx = 0; + goto free_irq; + } + } + + return RETURN_OK; + +free_irq: + cfg_num = q_index; + + for (q_index = 0; q_index < cfg_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + + free_irq(rq_info->irq_id, rq_info); + tasklet_kill(&rq_info->tasklet); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + rq_info->irq_id); + rq_info->irq_id = 0; + rq_info->msix_entry_idx = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_int(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + hifc_set_msix_state(hba->hw_dev_handle, rq_info->msix_entry_idx, + HIFC_MSIX_DISABLE); + free_irq(rq_info->irq_id, rq_info); + tasklet_kill(&rq_info->tasklet); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + rq_info->irq_id); + rq_info->irq_id = 0; + rq_info->msix_entry_idx = 0; + } +} + +static unsigned int hifc_alloc_root_rq_completion_buff( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int back_index = 0; + unsigned int rqc_buff_size = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + + /* 2048 * Size */ + rqc_buff_size = rq_info->q_depth * + sizeof(struct hifc_root_rq_complet_info_s); + rq_info->rq_completion_buff = dma_alloc_coherent( + &hba->pci_dev->dev, + rqc_buff_size, + &rq_info->rq_completion_dma, + GFP_KERNEL); + if (!rq_info->rq_completion_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Allocate Root RQ completion buffer failed, RQ Index = %u.", + q_index); + + goto free_buff; + } + memset(rq_info->rq_completion_buff, 0, rqc_buff_size); + rq_info->rqc_buff_size = rqc_buff_size; + } + + return RETURN_OK; + +free_buff: + + back_index = q_index; + + for (q_index = 0; q_index < back_index; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, rq_info->rqc_buff_size, + rq_info->rq_completion_buff, + rq_info->rq_completion_dma); + rq_info->rq_completion_buff = NULL; + rq_info->rq_completion_dma = 0; + rq_info->rqc_buff_size = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_completion_buff( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, rq_info->rqc_buff_size, + rq_info->rq_completion_buff, + rq_info->rq_completion_dma); + rq_info->rq_completion_buff = NULL; + rq_info->rq_completion_dma = 0; + rq_info->rqc_buff_size = 0; + } +} + +static unsigned int hifc_alloc_root_rq_rcv_buff( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int back_index = 0; + unsigned int rq_rcv_buff_size = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + + /* Depth(2048) * Buff_Size(2048) */ + rq_rcv_buff_size = rq_info->q_depth * + HIFC_ROOT_RQ_RECV_BUFF_SIZE; + rq_info->rq_rcv_buff = dma_alloc_coherent(&hba->pci_dev->dev, + rq_rcv_buff_size, + &rq_info->rq_rcv_dma, + GFP_KERNEL); + if (!rq_info->rq_rcv_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Allocate Root RQ receive buffer failed, RQ index = %u", + q_index); + + goto free_buff; + } + memset(rq_info->rq_rcv_buff, 0, rq_rcv_buff_size); + rq_info->rq_rcv_buff_size = rq_rcv_buff_size; + } + + return RETURN_OK; + +free_buff: + + back_index = q_index; + + for (q_index = 0; q_index < back_index; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, + rq_info->rq_rcv_buff_size, + rq_info->rq_rcv_buff, rq_info->rq_rcv_dma); + rq_info->rq_rcv_buff = NULL; + rq_info->rq_rcv_dma = 0; + rq_info->rq_rcv_buff_size = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_rcv_buff(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, + rq_info->rq_rcv_buff_size, + rq_info->rq_rcv_buff, rq_info->rq_rcv_dma); + rq_info->rq_rcv_buff = NULL; + rq_info->rq_rcv_dma = 0; + rq_info->rq_rcv_buff_size = 0; + } +} + +static void hifc_init_root_rq_wqe(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned short wqe_index = 0; + unsigned int dma_offset = 0; + dma_addr_t rq_completion_dma = 0; + dma_addr_t rq_rcv_dma = 0; + struct nic_rq_wqe *rq_wqe = NULL; + struct nic_wqe_ctrl_sec *wqe_ctrl = NULL; + struct nic_rq_sge_sec *buff_sge = NULL; + struct nic_rq_bd_sec *rq_buff_bd = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + root_info = v_root_info; + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + + for (wqe_index = 0; wqe_index < rq_info->q_depth; wqe_index++) { + rq_wqe = (struct nic_rq_wqe *) + hifc_slq_get_addr(rq_info->rq_handle, + wqe_index); + if (!rq_wqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, "[err]Get Rq Wqe failed"); + + return; + } + memset(rq_wqe, 0, sizeof(struct nic_rq_wqe)); + + /* Initialize ctrl section */ + wqe_ctrl = &rq_wqe->rq_wqe_ctrl_sec; + wqe_ctrl->bs.owner = HIFC_ROOT_RQ_LOOP_OWNER; + /* control section = 8 bytes */ + wqe_ctrl->bs.ctrl_sec_len = 1; + /* complete section = 16B for SGE */ + wqe_ctrl->bs.completion_sec_len = 2; + /* bd section = 8B */ + wqe_ctrl->bs.buf_desc_sec_len = 1; + wqe_ctrl->bs.cf = 1; /* use SGE */ + + /* Fill wqe receive information section */ + buff_sge = &rq_wqe->rx_sge; + dma_offset = sizeof(struct hifc_root_rq_complet_info_s) + * wqe_index; + rq_completion_dma = rq_info->rq_completion_dma + + dma_offset; + buff_sge->wb_addr_low = + HIFC_LOW_32_BITS(rq_completion_dma); + buff_sge->wb_addr_high = + HIFC_HIGH_32_BITS(rq_completion_dma); + buff_sge->bs0.length = + sizeof(struct hifc_root_rq_complet_info_s); + + /* Fill db */ + rq_buff_bd = &rq_wqe->pkt_buf_addr; + dma_offset = HIFC_ROOT_RQ_RECV_BUFF_SIZE * wqe_index; + rq_rcv_dma = rq_info->rq_rcv_dma + dma_offset; + rq_buff_bd->pkt_buf_addr_high = + HIFC_HIGH_32_BITS(rq_rcv_dma); + rq_buff_bd->pkt_buf_addr_low = + HIFC_LOW_32_BITS(rq_rcv_dma); + + /* big-little endian convert */ + hifc_cpu_to_big32((void *)rq_wqe, + sizeof(struct nic_rq_wqe)); + } + + rq_info->pi = rq_info->q_depth - 1; + rq_info->owner = HIFC_ROOT_RQ_LOOP_OWNER; + } +} + +static unsigned int hifc_calc_cmd_rq_num(unsigned int remain_rq_num) +{ + unsigned int ret = 0; + + if (remain_rq_num < HIFC_ROOT_CFG_RQ_NUM_MAX) + ret = remain_rq_num; + else + ret = HIFC_ROOT_CFG_RQ_NUM_MAX; + + return ret; +} + +static void hifc_assemble_root_rq_ctx(unsigned int cmd_rq_num, + struct hifc_root_rq_info_s *v_rq_info, + void *v_buf) +{ + unsigned int q_index = 0; + unsigned long long ci_init_addr = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_qp_ctxt_header *cmdq_header = NULL; + struct hifc_rq_ctxt *rq_ctx = NULL; + struct hifc_rq_ctxt_block *rq_ctx_block = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buf, return); + + rq_info = v_rq_info; + rq_ctx_block = (struct hifc_rq_ctxt_block *)v_buf; + cmdq_header = &rq_ctx_block->cmdq_hdr; + + /* cmdheader initialization */ + cmdq_header->num_queues = (unsigned short)cmd_rq_num; + cmdq_header->queue_type = HIFC_CMDQ_QUEUE_TYPE_RQ; + cmdq_header->addr_offset = HIFC_ROOT_RQ_CTX_OFFSET(rq_info->max_qnum, + rq_info->qid); + + /* big-little endian convert */ + hifc_cpu_to_big32(cmdq_header, sizeof(struct hifc_qp_ctxt_header)); + + for (q_index = 0; q_index < cmd_rq_num; q_index++) { + rq_info = v_rq_info + q_index; + rq_ctx = &rq_ctx_block->rq_ctx[q_index]; + memset(rq_ctx, 0, sizeof(struct hifc_rq_ctxt)); + + rq_ctx->pi_gpa_hi = HIFC_HIGH_32_BITS(rq_info->pi_dma_addr); + rq_ctx->pi_gpa_lo = HIFC_LOW_32_BITS(rq_info->pi_dma_addr); + rq_ctx->bs2.ci = 0; + rq_ctx->bs0.pi = 0; + + rq_ctx->bs6.ci_cla_tbl_addr_hi = + HIFC_CLA_HIGH_ADDR(rq_info->ci_cla_tbl_addr); + rq_ctx->ci_cla_tbl_addr_lo = + HIFC_CLA_LOW_ADDR(rq_info->ci_cla_tbl_addr); + + ci_init_addr = hifc_slq_get_first_pageaddr(rq_info->rq_handle); + rq_ctx->bs2.ci_wqe_page_addr_hi = + HIFC_CI_WQE_PAGE_HIGH_ADDR(ci_init_addr); + rq_ctx->ci_wqe_page_addr_lo = + HIFC_CI_WQE_PAGE_LOW_ADDR(ci_init_addr); + + rq_ctx->bs.ceq_en = 0; + rq_ctx->bs.owner = HIFC_ROOT_RQ_LOOP_OWNER; + rq_ctx->bs0.int_num = rq_info->msix_entry_idx; + + rq_ctx->bs3.prefetch_cache_threshold = + HIFC_ROOT_CTX_WQE_PRERETCH_THRESHOLD; + rq_ctx->bs3.prefetch_max = HIFC_ROOT_CTX_WQE_PREFETCH_MAX; + rq_ctx->bs3.prefetch_min = HIFC_ROOT_CTX_WQE_PREFETCH_MIN; + rq_ctx->bs5.prefetch_ci_wqe_page_addr_hi = + rq_ctx->bs2.ci_wqe_page_addr_hi; + rq_ctx->prefetch_ci_wqe_page_addr_lo = + rq_ctx->ci_wqe_page_addr_lo; + + /* big-little endian convert */ + hifc_cpu_to_big32(rq_ctx, sizeof(struct hifc_rq_ctxt)); + } +} + +static unsigned int hifc_cfg_root_rq_ctx(unsigned int cmd_rq_num, + void *v_handle, + struct hifc_cmd_buf *v_chipif_cmd_buff) +{ + int ret = 0; + unsigned short buff_used_size = 0; + unsigned int time_out = 0xF0000000; + unsigned long long uc_return = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_chipif_cmd_buff, + return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(uc_return); + UNF_REFERNCE_VAR(time_out); + UNF_REFERNCE_VAR(ret); + + buff_used_size = (unsigned short)(sizeof(struct hifc_qp_ctxt_header) + + sizeof(struct hifc_rq_ctxt) * cmd_rq_num); + v_chipif_cmd_buff->size = buff_used_size; + + ret = hifc_cmdq_direct_resp(v_handle, + HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_L2NIC, + HIFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT, + v_chipif_cmd_buff, + (u64 *)&uc_return, + time_out); + if ((ret != RETURN_OK) || (uc_return != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_cmdq_direct_resp failed, uiret %d, ullUcRet %llu", + ret, uc_return); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int hifc_init_root_rq_ctx( + void *v_handle, + struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmd_rq_num = 0; + unsigned int remain_rq_num = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_cmd_buf *chipif_cmd_buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info); + + chipif_cmd_buf = hifc_alloc_cmd_buf(v_handle); + if (!chipif_cmd_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_alloc_cmd_buf failed"); + + return ENOMEM; + } + + remain_rq_num = root_info->rq_num; + while (remain_rq_num > 0) { + cmd_rq_num = hifc_calc_cmd_rq_num(remain_rq_num); + remain_rq_num -= cmd_rq_num; + + /* Assemble cmd buffer context */ + hifc_assemble_root_rq_ctx(cmd_rq_num, rq_info, + chipif_cmd_buf->buf); + + /* Send via ucode */ + ret = hifc_cfg_root_rq_ctx(cmd_rq_num, v_handle, + chipif_cmd_buf); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]hifc_cfg_root_rq_ctx failed, return %u", + ret); + break; + } + + rq_info = rq_info + cmd_rq_num; + } + + /* Free cmd buffer */ + hifc_free_cmd_buf(v_handle, chipif_cmd_buf); + + return ret; +} + +static void hifc_update_root_rq_pi(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + + wmb(); + *rq_info->pi_vir_addr = cpu_to_be16(rq_info->pi); + } +} + +static unsigned int hifc_create_root_rqs(struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_root_info->phba; + + /* Allocate RQ struct */ + ret = hifc_alloc_root_rq_info(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_info failed"); + + return ret; + } + + /* Initialize RQ basic information */ + hifc_init_root_rq_basic_info(v_root_info); + + /* Apply RQ(s) PI GPA */ + ret = hifc_alloc_root_rq_pi_addr(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_pi_addr failed, return %u", + ret); + + goto free_root_rq_info; + } + + /* Apply RQ's buffer */ + ret = hifc_alloc_root_rq_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_buff failed, return %u", + ret); + + goto free_rq_pi_addr; + } + + /* Apply completion buffer */ + ret = hifc_alloc_root_rq_completion_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_completion_buff failed, return %u", + ret); + + goto free_root_rq_buff; + } + + /* Allocate root RQ receiving buffer */ + ret = hifc_alloc_root_rq_rcv_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_rcv_buff failed, return %u", + ret); + + goto free_root_rq_completion_buff; + } + + /* Initialize RQ WQE struct */ + hifc_init_root_rq_wqe(v_root_info); + + /* Apply RQ's interrupt resources */ + ret = hifc_alloc_root_rq_int(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_int failed, return %u", + ret); + + goto free_root_rq_receive_buff; + } + + /* Initialize RQ context */ + ret = hifc_init_root_rq_ctx(hba->hw_dev_handle, v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_init_root_rq_ctx Failed, return %u", ret); + + goto free_root_rq_int; + } + + /* Update SQ PI */ + hifc_update_root_rq_pi(v_root_info); + return RETURN_OK; + +free_root_rq_int: + hifc_free_root_rq_int(v_root_info); + +free_root_rq_receive_buff: + hifc_free_root_rq_rcv_buff(v_root_info); + +free_root_rq_completion_buff: + hifc_free_root_rq_completion_buff(v_root_info); + +free_root_rq_buff: + hifc_free_root_rq_buff(v_root_info); + +free_rq_pi_addr: + hifc_free_root_rq_pi_addr(v_root_info); + +free_root_rq_info: + hifc_free_root_rq_info(v_root_info); + + return ret; +} + +static void hifc_destroy_root_rqs(struct hifc_root_info_s *v_root_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + hifc_free_root_rq_rcv_buff(v_root_info); + + hifc_free_root_rq_completion_buff(v_root_info); + + hifc_free_root_rq_int(v_root_info); + + hifc_free_root_rq_buff(v_root_info); + + hifc_free_root_rq_pi_addr(v_root_info); + + hifc_free_root_rq_info(v_root_info); +} + +static unsigned int hifc_cfg_root_ctx(struct hifc_root_info_s *v_root_info) +{ + int ret; + struct hifc_hba_s *hba = NULL; + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + + ret = hifc_set_root_ctxt(hba->hw_dev_handle, HIFC_ROOT_RQ_DEPTH, + HIFC_ROOT_SQ_DEPTH, + HIFC_ROOT_RQ_RECV_BUFF_SIZE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]chipif_func_vat_info_set failed, return [%d]", + ret); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static void hifc_init_root_basic_info(struct hifc_hba_s *v_hba) +{ + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_hba, return); + + root_info = &v_hba->root_info; + memset(root_info, 0, sizeof(struct hifc_root_info_s)); + + root_info->phba = (void *)v_hba; + + root_info->rq_num = HIFC_ROOT_RQ_NUM; + root_info->sq_num = HIFC_ROOT_SQ_NUM; +} + +unsigned int hifc_create_root_queues(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + int slq_ret = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + /* Initialize basic root information */ + hba = (struct hifc_hba_s *)v_hba; + hifc_init_root_basic_info(hba); + + root_info = &hba->root_info; + + /* slq Init */ + slq_ret = hifc_slq_init(hba->hw_dev_handle, + (int)(root_info->sq_num + root_info->rq_num)); + if (slq_ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_slq_init init failed, ret:0x%x", slq_ret); + + return UNF_RETURN_ERROR; + } + + /* Create SQ, and send cmdq to ucode for initialization of SQ context */ + ret = hifc_create_root_sqs(root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_create_root_sqs failed, return [%u]", + ret); + + hifc_slq_uninit(hba->hw_dev_handle); + return ret; + } + + /* Create RQ */ + ret = hifc_create_root_rqs(root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_create_root_rqs failed, return [%u]", + ret); + + hifc_destroy_root_sqs(root_info); + hifc_slq_uninit(hba->hw_dev_handle); + return ret; + } + + /* Configure root context */ + ret = hifc_cfg_root_ctx(root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_cfg_root_ctx failed, return [%u]", ret); + + hifc_destroy_root_rqs(root_info); + hifc_destroy_root_sqs(root_info); + hifc_slq_uninit(hba->hw_dev_handle); + return ret; + } + + return RETURN_OK; +} + +void hifc_destroy_root_queues(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + + hifc_destroy_root_rqs(root_info); + hifc_destroy_root_sqs(root_info); + + hifc_slq_uninit(hba->hw_dev_handle); +} + +static void hifc_ring_root_sq_db(struct hifc_hba_s *v_hba, + struct hifc_root_sq_info_s *v_sq_info) +{ + struct nic_tx_doorbell db; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_sq_info, return); + + memset(&db, 0, sizeof(struct nic_tx_doorbell)); + + db.bs0.srv_type = HIFC_DOORBELL_SQ_TYPE; + db.bs0.queue_id = v_sq_info->qid; + db.bs0.pi_high = v_sq_info->pi >> HIFC_DOORBELL_SQ_PI_HIGH_BITS_SHIFT; + db.bs0.cos = 0; + + db.dw0 = cpu_to_be32(db.dw0); + wmb(); + + *((unsigned long long *)(v_sq_info->normal_db.virt_map_addr) + + (v_sq_info->pi & HIFC_DOORBELL_SQ_PI_LOW_BITS_MASK)) = + *(unsigned long long *)&db; +} + +static int hifc_root_sq_is_empty(struct hifc_root_sq_info_s *v_sq_info) +{ + unsigned short cur_pi = 0; + unsigned short cur_ci = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sq_info, return UNF_TRUE); + + /* pi == ci empty, pi-ci = 1 full */ + cur_pi = v_sq_info->pi; + cur_ci = *v_sq_info->ci_addr; + cur_ci = be16_to_cpu(cur_ci); + + if (cur_pi == cur_ci) + return UNF_TRUE; + + return UNF_FALSE; +} + +static int hifc_root_sq_is_full(struct hifc_root_sq_info_s *v_sq_info) +{ + unsigned short cur_pi = 0; + unsigned short cur_ci = 0; + unsigned short valid_wqe_num = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sq_info, return UNF_TRUE); + + /* pi == ci empty, pi-ci = 1 full */ + cur_pi = v_sq_info->pi; + cur_ci = *v_sq_info->ci_addr; + cur_ci = be16_to_cpu(cur_ci); + valid_wqe_num = v_sq_info->q_depth - 1; + + if ((valid_wqe_num == cur_pi - cur_ci) || + (valid_wqe_num == v_sq_info->q_depth + cur_pi - cur_ci)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Root SQ[%u] is full, PI %u, CI %u", + v_sq_info->global_qpn, cur_pi, cur_ci); + return UNF_TRUE; + } + + return UNF_FALSE; +} + +static void hifc_build_root_wqe_qsf(void *v_qsf) +{ + struct hifc_root_qsf_s *root_qsf = NULL; + + root_qsf = (struct hifc_root_qsf_s *)v_qsf; + + /* route to ucode */ + /* MSS range 0x50~0x3E00 */ + root_qsf->route_to_ucode = 1; + root_qsf->mss = 0x3E00; +} + +unsigned int hifc_root_sq_enqueue(void *v_hba, struct hifc_root_sqe_s *v_sqe) +{ + unsigned char task_type = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sqe_s *sqe = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sqe, + return UNF_RETURN_ERROR); + + /* Root use one sq by default */ + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info); + task_type = (unsigned char)v_sqe->task_section.fc_dw0.task_type; + + spin_lock_irqsave(&sq_info->root_sq_spin_lock, flag); + + /* Check flush state */ + if (sq_info->in_flush == UNF_TRUE) { + HIFC_ERR_IO_STAT(hba, task_type); + HIFC_HBA_STAT(hba, HIFC_STAT_ROOT_IO_FLUSHED); + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Root SQ is flushing"); + return UNF_RETURN_ERROR; + } + + /* Check root SQ whether is full */ + if (hifc_root_sq_is_full(sq_info) == UNF_TRUE) { + HIFC_ERR_IO_STAT(hba, task_type); + HIFC_HBA_STAT(hba, HIFC_STAT_ROOT_SQ_FULL); + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Root SQ is full"); + return UNF_RETURN_ERROR; + } + + if (unlikely(!hba->heart_status)) { + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Heart status is false"); + return UNF_RETURN_ERROR; + } + /* Get available wqe */ + sqe = (struct hifc_root_sqe_s *)hifc_slq_get_addr(sq_info->sq_handle, + sq_info->pi); + if (!sqe) { + HIFC_ERR_IO_STAT(hba, task_type); + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Get root SQ Sqe failed, PI %u", sq_info->pi); + + return UNF_RETURN_ERROR; + } + + hifc_build_root_wqe_qsf((void *)(&v_sqe->ctrl_section.qsf)); + HIFC_IO_STAT(hba, task_type); + hifc_convert_root_wqe_to_big_endian(v_sqe); + memcpy(sqe, v_sqe, sizeof(struct hifc_root_sqe_s)); + + /* Update PI and Obit */ + hifc_update_producer_info(sq_info->q_depth, &sq_info->pi, + &sq_info->owner); + + /* doorbell */ + hifc_ring_root_sq_db(hba, sq_info); + + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + UNF_REFERNCE_VAR(task_type); + + return RETURN_OK; +} + +static int hifc_root_rqe_done( + struct hifc_root_rq_complet_info_s *v_completion_info) +{ + if (v_completion_info->done != 0) + return UNF_TRUE; + + return UNF_FALSE; +} + +static void hifc_clear_root_rqe_done( + struct hifc_root_rq_complet_info_s *v_completion_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_completion_info, return); + + v_completion_info->done = 0; +} + +static int hifc_check_root_rqe_type( + struct hifc_root_rq_complet_info_s *v_completion_info) +{ + if (v_completion_info->fc_pkt != 0) + return UNF_TRUE; + + return UNF_FALSE; +} + +void hifc_update_root_rq_info(struct hifc_root_rq_info_s *v_rq_info, + unsigned short v_rcv_buf_num) +{ + unsigned short loop = 0; + struct hifc_root_rq_complet_info_s completion_info = { 0 }; + struct hifc_root_rq_complet_info_s *complet_info = NULL; + + for (loop = 0; loop < v_rcv_buf_num; loop++) { + /* Obtain CompletionInfo */ + complet_info = (struct hifc_root_rq_complet_info_s *) + (v_rq_info->rq_completion_buff) + v_rq_info->ci; + + /* big-little endian convert */ + memcpy(&completion_info, complet_info, sizeof(completion_info)); + hifc_big_to_cpu32(&completion_info, sizeof(completion_info)); + + /* Clear done bit */ + hifc_clear_root_rqe_done(&completion_info); + + /* Write back done bit */ + hifc_cpu_to_big32(&completion_info, sizeof(completion_info)); + memcpy(complet_info, &completion_info, sizeof(completion_info)); + + /* Update Obit and PI in RQE */ + hifc_update_producer_info(v_rq_info->q_depth, &v_rq_info->pi, + &v_rq_info->owner); + + v_rq_info->ci = ((v_rq_info->ci + 1) < v_rq_info->q_depth) ? + (v_rq_info->ci + 1) : 0; + + wmb(); + *v_rq_info->pi_vir_addr = cpu_to_be16(v_rq_info->pi); + } +} + +void hifc_root_rqe_analysis( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_info_s *v_rq_info, + struct hifc_root_rq_complet_info_s *v_completion_info, + unsigned short v_rcv_buf_num) +{ + unsigned int ret = UNF_RETURN_ERROR; + + if (v_completion_info->sts_only) { + /* case1: receive ElsRsp Status */ + if (v_completion_info->status == RETURN_OK) + ret = hifc_rq_rcv_els_rsp_sts(v_hba, v_completion_info); + else + ret = hifc_rq_rcv_srv_err(v_hba, v_completion_info); + } else { + ret = hifc_rcv_service_frame_from_rq(v_hba, v_rq_info, + v_completion_info, + v_rcv_buf_num); + } + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[warn]Up Layer Process RQE Frame or Status abnormal(0x%x)", + ret); + } +} + +void hifc_process_root_rqe(unsigned long v_rq_info) +{ + int rqe_done = UNF_FALSE; + int rqe_valid = UNF_FALSE; + unsigned short rcv_buf_num = 0; + unsigned int index = 0; + struct nic_rq_wqe *rq_wqe = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_complet_info_s *complet_info = NULL; + struct hifc_root_rq_complet_info_s completion_info = { 0 }; + + struct hifc_root_rq_info_s *rq_info = + (struct hifc_root_rq_info_s *)v_rq_info; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, rq_info, return); + + root_info = (struct hifc_root_info_s *)(rq_info->root_info); + hba = (struct hifc_hba_s *)(root_info->phba); + + for (index = 0; index < HIFC_RQE_MAX_PROCESS_NUM_PER_INTR; index++) { + /* Obtain RQE */ + rq_wqe = (struct nic_rq_wqe *) + hifc_slq_get_addr(rq_info->rq_handle, rq_info->ci); + if (!rq_wqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, "[err]Get Rqe failed"); + break; + } + + /* Check whether to process RQE */ + complet_info = (struct hifc_root_rq_complet_info_s *) + (rq_info->rq_completion_buff) + rq_info->ci; + + memcpy(&completion_info, complet_info, sizeof(completion_info)); + hifc_big_to_cpu32(&completion_info, sizeof(completion_info)); + + rqe_done = hifc_root_rqe_done(&completion_info); + if (rqe_done != UNF_TRUE) { + atomic_set(&rq_info->flush_state, + HIFC_QUEUE_FLUSH_DONE); + break; + } + + rmb(); + + rcv_buf_num = (completion_info.buf_length + + HIFC_ROOT_RQ_RECV_BUFF_SIZE - 1) / + HIFC_ROOT_RQ_RECV_BUFF_SIZE; + if (rcv_buf_num == 0) + rcv_buf_num = 1; + + rqe_valid = hifc_check_root_rqe_type(&completion_info); + if (rqe_valid == UNF_TRUE) { + hifc_root_rqe_analysis(hba, rq_info, &completion_info, + rcv_buf_num); + } else { + /* Receive illegal frames and record */ + HIFC_IO_STAT(hba, HIFCOE_TASK_T_BUTT); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) Receive an unsupported frame, drop it", + hba->port_cfg.port_id); + } + + hifc_update_root_rq_info(rq_info, rcv_buf_num); + } + + if (index == HIFC_RQE_MAX_PROCESS_NUM_PER_INTR) + tasklet_schedule(&rq_info->tasklet); +} + +static inline int hifc_is_scq_link_wqe(struct hifc_scq_info_s *v_scq_info) +{ + unsigned short custom_scqe_num = 0; + + custom_scqe_num = v_scq_info->ci + 1; + + if ((custom_scqe_num % v_scq_info->wqe_num_per_buf == 0) || + (v_scq_info->valid_wqe_num == custom_scqe_num)) + return UNF_TRUE; + else + return UNF_FALSE; +} + +static inline struct hifcoe_scqe_type_s *hifc_get_scq_entry( + struct hifc_scq_info_s *v_scq_info) +{ + unsigned int buf_id = 0; + unsigned short buf_offset = 0; + unsigned short ci = 0; + struct cqm_buf_list_s *buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, return NULL); + + ci = v_scq_info->ci; + buf_id = ci / v_scq_info->wqe_num_per_buf; + buf = &v_scq_info->cqm_scq_info->q_room_buf_1.buf_list[buf_id]; + buf_offset = (unsigned short)(ci % v_scq_info->wqe_num_per_buf); + + return (struct hifcoe_scqe_type_s *)(buf->va) + buf_offset; +} + +static inline int hifc_is_cqe_done(unsigned int *v_done, unsigned int *v_owner, + unsigned short v_driver_owner) +{ + return ((((unsigned short)(!!(*v_done & HIFC_DONE_MASK)) == + v_driver_owner) && ((unsigned short) + (!!(*v_owner & HIFC_OWNER_MASK)) == v_driver_owner)) ? + UNF_TRUE : UNF_FALSE); +} + +unsigned int hifc_process_scq_cqe_entity(unsigned long v_scq_info, + unsigned int proc_cnt) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int index = 0; + struct hifc_wq_header_s *queue_header = NULL; + struct hifcoe_scqe_type_s *scqe = NULL; + struct hifcoe_scqe_type_s tmp_scqe; + + struct hifc_scq_info_s *scq_info = (struct hifc_scq_info_s *)v_scq_info; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scq_info, return ret); + + queue_header = (struct hifc_wq_header_s *) + (void *)(scq_info->cqm_scq_info->q_header_vaddr); + + for (index = 0; index < proc_cnt;) { + /* If linked wqe, then update CI */ + if (hifc_is_scq_link_wqe(scq_info) == UNF_TRUE) { + hifc_update_consumer_info(scq_info->valid_wqe_num, + &scq_info->ci, + &scq_info->ci_owner); + hifc_update_cq_header(&queue_header->ci_record, + scq_info->ci, + scq_info->ci_owner); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "[info]Current wqe is a linked wqe"); + continue; + } + + /* Get SCQE and then check obit & donebit whether been set */ + scqe = hifc_get_scq_entry(scq_info); + if (unlikely(!scqe)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[warn]Scqe is NULL"); + break; + } + + if (hifc_is_cqe_done((unsigned int *)(void *)(&scqe->wd0), + (unsigned int *)(void *)(&scqe->ch.wd0), + scq_info->ci_owner) != UNF_TRUE) { + atomic_set(&scq_info->flush_state, + HIFC_QUEUE_FLUSH_DONE); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "[info]Now has no valid scqe"); + break; + } + + /* rmb & do memory copy */ + rmb(); + memcpy(&tmp_scqe, scqe, sizeof(struct hifcoe_scqe_type_s)); + + hifc_big_to_cpu32(&tmp_scqe, sizeof(struct hifcoe_scqe_type_s)); + + /* process SCQ entry */ + ret = hifc_rcv_scqe_entry_from_scq(scq_info->phba, + (void *)&tmp_scqe, + scq_info->queue_id); + if (unlikely(ret != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]QueueId(0x%x) scqn(0x%x) scqe process error at CI(0x%x)", + scq_info->queue_id, scq_info->scqn, + scq_info->ci); + } + + /* Update Driver's CI & Obit */ + hifc_update_consumer_info(scq_info->valid_wqe_num, + &scq_info->ci, &scq_info->ci_owner); + hifc_update_cq_header(&queue_header->ci_record, scq_info->ci, + scq_info->ci_owner); + index++; + } + /* Re-schedule again if necessary */ + if (proc_cnt == index) + tasklet_schedule(&scq_info->tasklet); + + return index; +} + +void hifc_set_scq_irq_cfg(struct hifc_hba_s *hba, unsigned int mode, + unsigned short msix_index) +{ + unsigned char pending_limt = 0; + unsigned char coalesc_timer_cfg = 0; + + struct nic_interrupt_info info = { 0 }; + + if (mode != HIFC_SCQ_INTR_LOW_LATENCY_MODE) { + pending_limt = 5; + coalesc_timer_cfg = 10; + } + + memset(&info, 0, sizeof(info)); + info.interrupt_coalesc_set = 1; + info.lli_set = 0; + info.pending_limt = pending_limt; + info.coalesc_timer_cfg = coalesc_timer_cfg; + info.resend_timer_cfg = 0; + info.msix_index = msix_index; + hifc_set_interrupt_cfg(hba->hw_dev_handle, info); +} + +void hifc_process_scq_cqe(unsigned long v_scq_info) +{ + struct hifc_scq_info_s *scq_info = (struct hifc_scq_info_s *)v_scq_info; + + HIFC_CHECK(INVALID_VALUE32, scq_info, return); + + hifc_process_scq_cqe_entity(v_scq_info, + HIFC_CQE_MAX_PROCESS_NUM_PER_INTR); +} + +irqreturn_t hifc_scq_irq(int v_irq, void *v_scq_info) +{ + HIFC_CHECK(INVALID_VALUE32, NULL != v_scq_info, return IRQ_NONE); + + tasklet_schedule(&((struct hifc_scq_info_s *)v_scq_info)->tasklet); + + return IRQ_HANDLED; +} + +static unsigned int hifc_alloc_scq_int(struct hifc_scq_info_s *v_scq_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned short act_num = 0; + struct irq_info irq_info; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, + return UNF_RETURN_ERROR); + + /* 1. Alloc & check SCQ IRQ */ + hba = (struct hifc_hba_s *)(v_scq_info->phba); + ret = hifc_alloc_irqs(hba->hw_dev_handle, SERVICE_T_FC, + HIFC_INT_NUM_PER_QUEUE, &irq_info, &act_num); + if ((ret != RETURN_OK) || (act_num != HIFC_INT_NUM_PER_QUEUE)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate scq irq failed, return %d", ret); + + return UNF_RETURN_ERROR; + } + + if (irq_info.msix_entry_idx >= HIFC_SCQ_INT_ID_MAX) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]SCQ irq id exceed %d, msix_entry_idx %d", + HIFC_SCQ_INT_ID_MAX, irq_info.msix_entry_idx); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + irq_info.irq_id); + + return UNF_RETURN_ERROR; + } + + v_scq_info->irq_id = (unsigned int)(irq_info.irq_id); + v_scq_info->msix_entry_idx = (unsigned short)(irq_info.msix_entry_idx); + + ret = snprintf(v_scq_info->irq_name, HIFC_IRQ_NAME_MAX - 1, + "fc_scq%u_%x_msix%u", v_scq_info->queue_id, + hba->port_cfg.port_id, v_scq_info->msix_entry_idx); + UNF_FUNCTION_RETURN_CHECK(ret, HIFC_IRQ_NAME_MAX - 1); + /* 2. SCQ IRQ tasklet init */ + tasklet_init(&v_scq_info->tasklet, hifc_process_scq_cqe, + (unsigned long)v_scq_info); + + /* 3. Request IRQ for SCQ */ + ret = request_irq(v_scq_info->irq_id, hifc_scq_irq, 0UL, + v_scq_info->irq_name, v_scq_info); + hifc_set_msix_state(hba->hw_dev_handle, v_scq_info->msix_entry_idx, + HIFC_MSIX_ENABLE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Request SCQ irq failed, SCQ Index = %u, return %d", + v_scq_info->queue_id, ret); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + v_scq_info->irq_id); + memset(v_scq_info->irq_name, 0, HIFC_IRQ_NAME_MAX); + v_scq_info->irq_id = 0; + v_scq_info->msix_entry_idx = 0; + return UNF_RETURN_ERROR; + } + return RETURN_OK; +} + +static void hifc_free_scq_int(struct hifc_scq_info_s *v_scq_info) +{ + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, return); + + hba = (struct hifc_hba_s *)(v_scq_info->phba); + hifc_set_msix_state(hba->hw_dev_handle, v_scq_info->msix_entry_idx, + HIFC_MSIX_DISABLE); + free_irq(v_scq_info->irq_id, v_scq_info); + tasklet_kill(&v_scq_info->tasklet); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, v_scq_info->irq_id); + memset(v_scq_info->irq_name, 0, HIFC_IRQ_NAME_MAX); + v_scq_info->irq_id = 0; + v_scq_info->msix_entry_idx = 0; +} + +static void hifc_init_scq_info(struct hifc_hba_s *v_hba, + struct cqm_queue_s *v_cqm_scq, + unsigned int queue_id, + struct hifc_scq_info_s **v_ppscq_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_cqm_scq, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_ppscq_info, return); + + *v_ppscq_info = &v_hba->scq_info[queue_id]; + (*v_ppscq_info)->queue_id = queue_id; + (*v_ppscq_info)->scqn = v_cqm_scq->index; + (*v_ppscq_info)->phba = (void *)v_hba; + + (*v_ppscq_info)->cqm_scq_info = v_cqm_scq; + (*v_ppscq_info)->wqe_num_per_buf = v_cqm_scq->q_room_buf_1.buf_size / + HIFC_SCQE_SIZE; + (*v_ppscq_info)->wqe_size = HIFC_SCQE_SIZE; + + (*v_ppscq_info)->valid_wqe_num = (HIFC_SCQ_IS_STS(queue_id) ? + HIFC_STS_SCQ_DEPTH : HIFC_CMD_SCQ_DEPTH); + (*v_ppscq_info)->scqc_cq_depth = (HIFC_SCQ_IS_STS(queue_id) ? + HIFC_STS_SCQC_CQ_DEPTH : HIFC_CMD_SCQC_CQ_DEPTH); + (*v_ppscq_info)->scqc_ci_type = (HIFC_SCQ_IS_STS(queue_id) ? + HIFC_STS_SCQ_CI_TYPE : HIFC_CMD_SCQ_CI_TYPE); + + (*v_ppscq_info)->ci = 0; + (*v_ppscq_info)->ci_owner = 1; +} + +static void hifc_init_scq_header(struct hifc_wq_header_s *v_queue_header) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_queue_header, return); + + memset(v_queue_header, 0, sizeof(struct hifc_wq_header_s)); + + /* Obit default is 1 */ + v_queue_header->db_record.pmsn = 1 << 15; + v_queue_header->db_record.dump_pmsn = + v_queue_header->db_record.pmsn; + v_queue_header->ci_record.cmsn = 1 << 15; + v_queue_header->ci_record.dump_cmsn = + v_queue_header->ci_record.cmsn; + + /* Big endian convert */ + hifc_cpu_to_big64((void *)v_queue_header, + sizeof(struct hifc_wq_header_s)); +} + +static void hifc_cfg_scq_ctx(struct hifc_scq_info_s *v_scq_info, + struct hifcoe_cq_qinfo_s *v_scq_ctx) +{ + struct cqm_queue_s *cqm_scq_info = NULL; + struct hifc_queue_info_bus_s queue_bus; + unsigned long long parity = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, return); + + cqm_scq_info = v_scq_info->cqm_scq_info; + + v_scq_ctx->pcie_template_hi = 0; + v_scq_ctx->cur_cqe_gpa = + cqm_scq_info->q_room_buf_1.buf_list->pa >> HIFC_CQE_GPA_SHIFT; + v_scq_ctx->pi = 0; + v_scq_ctx->pi_o = 1; + v_scq_ctx->ci = v_scq_info->ci; + v_scq_ctx->ci_o = v_scq_info->ci_owner; + v_scq_ctx->c_eqn_msi_x = v_scq_info->msix_entry_idx; + v_scq_ctx->ci_type = v_scq_info->scqc_ci_type; + v_scq_ctx->cq_depth = v_scq_info->scqc_cq_depth; + v_scq_ctx->armq = HIFC_ARMQ_IDLE; + v_scq_ctx->cur_cqe_cnt = 0; + v_scq_ctx->cqe_max_cnt = 0; + v_scq_ctx->cqe_dmaattr_idx = 0; + v_scq_ctx->cq_so_ro = 0; + v_scq_ctx->init_mode = HIFC_CQ_INT_MODE; + v_scq_ctx->next_o = 1; + v_scq_ctx->loop_o = 1; + v_scq_ctx->next_cq_wqe_page_gpa = + cqm_scq_info->q_room_buf_1.buf_list[1].pa >> + HIFC_NEXT_CQE_GPA_SHIFT; + v_scq_ctx->pcie_template_lo = 0; + + v_scq_ctx->ci_gpa = (cqm_scq_info->q_header_paddr + + offsetof(struct hifc_wq_header_s, ci_record)) >> + HIFC_CQE_GPA_SHIFT; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + /* bits 20 */ + queue_bus.bus[0] |= + ((unsigned long long)(v_scq_info->scqn & 0xfffff)); + /* bits 3 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->pcie_template_lo)) << 20); + /* bits 28 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->ci_gpa & 0xfffffff)) << 23); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->cqe_dmaattr_idx)) << 51); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->cq_so_ro)) << 57); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->init_mode)) << 59); + /* bits 3 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->c_eqn_msi_x & 0x7)) << 61); + /* bits 7 */ + queue_bus.bus[1] |= + ((unsigned long long)(v_scq_ctx->c_eqn_msi_x >> 3)); + /* bits 1 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->ci_type)) << 7); + /* bits 3 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->cq_depth)) << 8); + /* bits 8 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->cqe_max_cnt)) << 11); + /* bits 3 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->pcie_template_hi)) << 19); + + parity = hifc_get_parity_value(queue_bus.bus, HIFC_SCQC_BUS_ROW, + HIFC_SCQC_BUS_COL); + v_scq_ctx->parity_0 = parity & 0x1; + v_scq_ctx->parity_1 = (parity >> 0x1) & 0x1; + v_scq_ctx->parity_2 = (parity >> 0x2) & 0x1; + + hifc_cpu_to_big64((void *)v_scq_ctx, sizeof(struct hifcoe_cq_qinfo_s)); +} + +static unsigned int hifc_create_scqc_via_cmdq_sync( + struct hifc_hba_s *v_hba, + struct hifcoe_cq_qinfo_s *v_scqc, + unsigned int scqn) +{ +#define HIFC_INIT_SCQC_TIMEOUT 3000 + + int ret; + unsigned int cvt_size; + struct hifcoe_cmdqe_creat_scqc_s init_scqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf alloc failed"); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SCQC); + return UNF_RETURN_ERROR; + } + + memset(&init_scqc_cmd, 0, sizeof(init_scqc_cmd)); + init_scqc_cmd.wd0.task_type = HIFCOE_TASK_T_INIT_SCQC; + init_scqc_cmd.wd1.scqn = HIFC_LSW(scqn); + cvt_size = sizeof(init_scqc_cmd) - sizeof(init_scqc_cmd.scqc); + hifc_cpu_to_big32(&init_scqc_cmd, cvt_size); + + /* v_scqc is already big endian */ + memcpy(init_scqc_cmd.scqc, v_scqc, sizeof(*v_scqc)); + memcpy(cmdq_in_buf->buf, &init_scqc_cmd, sizeof(init_scqc_cmd)); + cmdq_in_buf->size = sizeof(init_scqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, + cmdq_in_buf, NULL, HIFC_INIT_SCQC_TIMEOUT); + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send creat scqc via cmdq failed, ret=%d", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SCQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SCQC); + + return RETURN_OK; +} + +static unsigned int hifc_create_scq(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int scq_index = 0; + unsigned int scq_cfg_num = 0; + struct cqm_queue_s *cqm_scq = NULL; + void *handle = NULL; + struct hifc_scq_info_s *scq_info = NULL; + struct hifcoe_cq_qinfo_s scq_ctx_info; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + handle = v_hba->hw_dev_handle; + + /* Create SCQ by CQM interface */ + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) { + /* + * 1. Create/Allocate SCQ + * + * Notice: SCQ[0, 2, 4 ...]--->CMD SCQ, + * SCQ[1, 3, 5 ...]--->STS SCQ, SCQ[HIFC_TOTAL_SCQ_NUM-1] + * --->Defaul SCQ + */ + cqm_scq = cqm_object_nonrdma_queue_create( + handle, + CQM_OBJECT_NONRDMA_SCQ, + HIFC_SCQ_IS_STS(scq_index) ? + HIFC_STS_SCQ_DEPTH : + HIFC_CMD_SCQ_DEPTH, + HIFC_SCQE_SIZE, + v_hba); + if (!cqm_scq) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Create scq failed"); + + goto free_scq; + } + + /* 2. Initialize SCQ (info) */ + hifc_init_scq_info(v_hba, cqm_scq, scq_index, &scq_info); + + /* 3. Allocate & Initialize SCQ interrupt */ + ret = hifc_alloc_scq_int(scq_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Allocate scq interrupt failed"); + + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + goto free_scq; + } + + /* 4. Initialize SCQ queue header */ + hifc_init_scq_header( + (struct hifc_wq_header_s *) + (void *)cqm_scq->q_header_vaddr); + + /* 5. Initialize & Create SCQ CTX */ + memset(&scq_ctx_info, 0, sizeof(scq_ctx_info)); + hifc_cfg_scq_ctx(scq_info, &scq_ctx_info); + ret = hifc_create_scqc_via_cmdq_sync(v_hba, + &scq_ctx_info, + scq_info->scqn); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Create scq context failed"); + + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + goto free_scq; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Create SCQ[%u] Scqn=%u WqeNum=%u WqeSize=%u WqePerBuf=%u CqDepth=%u CiType=%u irq=%u msix=%u", + scq_info->queue_id, scq_info->scqn, + scq_info->valid_wqe_num, scq_info->wqe_size, + scq_info->wqe_num_per_buf, scq_info->scqc_cq_depth, + scq_info->scqc_ci_type, scq_info->irq_id, + scq_info->msix_entry_idx); + } + + /* + * Last SCQ is used to handle SCQE delivery access when clearing buffer + */ + v_hba->default_scqn = scq_info->scqn; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Default Scqn=%d CqmScqIndex=%u", + v_hba->default_scqn, cqm_scq->index); + + return RETURN_OK; + +free_scq: + hifc_flush_scq_ctx(v_hba); + + scq_cfg_num = scq_index; + for (scq_index = 0; scq_index < scq_cfg_num; scq_index++) { + scq_info = &v_hba->scq_info[scq_index]; + hifc_free_scq_int(scq_info); + cqm_scq = scq_info->cqm_scq_info; + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + } + + return UNF_RETURN_ERROR; +} + +static void hifc_destroy_scq(struct hifc_hba_s *v_hba) +{ + unsigned int scq_index = 0; + struct cqm_queue_s *cqm_scq = NULL; + struct hifc_scq_info_s *scq_info = NULL; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Start destroy total %d SCQ", HIFC_TOTAL_SCQ_NUM); + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + /* Use CQM to delete SCQ */ + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) { + scq_info = &v_hba->scq_info[scq_index]; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ALL, + "[info]Destroy SCQ%u, Scqn=%u, Irq=%u, msix=%u, name=%s", + scq_index, scq_info->scqn, scq_info->irq_id, + scq_info->msix_entry_idx, scq_info->irq_name); + + hifc_free_scq_int(scq_info); + cqm_scq = scq_info->cqm_scq_info; + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + } +} + +static void hifc_init_srq_info(struct hifc_hba_s *v_hba, + struct cqm_queue_s *v_cqm_srq, + struct hifc_srq_info_s *v_srq_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_cqm_srq, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + + v_srq_info->phba = (void *)v_hba; + + v_srq_info->cqm_srq_info = v_cqm_srq; + v_srq_info->wqe_num_per_buf = v_cqm_srq->q_room_buf_1.buf_size / + HIFC_SRQE_SIZE - 1; + v_srq_info->wqe_size = HIFC_SRQE_SIZE; + v_srq_info->valid_wqe_num = v_cqm_srq->valid_wqe_num; + v_srq_info->pi = 0; + v_srq_info->pi_owner = HIFC_SRQ_INIT_LOOP_O; + v_srq_info->pmsn = 0; + v_srq_info->srqn = v_cqm_srq->index; + v_srq_info->first_rqe_rcv_dma = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Init srq info(srq index 0x%x) valid wqe num 0x%x, buffer size 0x%x, wqe num per buf 0x%x", + v_cqm_srq->index, v_srq_info->valid_wqe_num, + v_cqm_srq->q_room_buf_1.buf_size, + v_srq_info->wqe_num_per_buf); +} + +static void hifc_init_srq_header(struct hifc_wq_header_s *v_queue_header) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_queue_header, return); + + memset(v_queue_header, 0, sizeof(struct hifc_wq_header_s)); +} + +static struct hifcoe_rqe_s *hifc_get_srq_entry( + struct hifc_srq_info_s *v_srq_info, + struct hifcoe_rqe_s **v_linked_rqe, + unsigned short position) +{ + unsigned int buf_id = 0; + unsigned int wqe_num_per_buf = 0; + unsigned short buf_offset = 0; + struct cqm_buf_list_s *buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return NULL); + + wqe_num_per_buf = v_srq_info->wqe_num_per_buf; + + buf_id = position / wqe_num_per_buf; + buf = &v_srq_info->cqm_srq_info->q_room_buf_1.buf_list[buf_id]; + buf_offset = position % ((unsigned short)wqe_num_per_buf); + + if (buf_offset + 1 == wqe_num_per_buf) + *v_linked_rqe = (struct hifcoe_rqe_s *)(buf->va) + + wqe_num_per_buf; + else + *v_linked_rqe = NULL; + + return (struct hifcoe_rqe_s *)(buf->va) + buf_offset; +} + +/** + * hifc_set_srq_wqe_owner_be - Assign a value to Owner Bit of WQE in the + * big-endian format of Wqe Page. + * @v_sqe_ctrl_in_wp: sqe ctrl wqe struct info for communicate with uncode + * @owner: owner value which need to set + */ +static void hifc_set_srq_wqe_owner_be( + struct hifcoe_wqe_ctrl_s *v_sqe_ctrl_in_wp, + unsigned int owner) +{ + struct hifcoe_wqe_ctrl_ch_s wqe_ctrl_ch; + + mb(); + + wqe_ctrl_ch.ctrl_ch_val = be32_to_cpu(v_sqe_ctrl_in_wp->ch.ctrl_ch_val); + wqe_ctrl_ch.wd0.owner = owner; + v_sqe_ctrl_in_wp->ch.ctrl_ch_val = cpu_to_be32(wqe_ctrl_ch.ctrl_ch_val); + + mb(); +} + +static void hifc_set_srq_link_wqe_owner_be(struct hifc_link_wqe_s *v_link_wqe, + unsigned int owner, + unsigned short pmsn) +{ + struct hifc_link_wqe_s local_lw; + + mb(); + local_lw.val_wd1 = be32_to_cpu(v_link_wqe->val_wd1); + local_lw.wd1.msn = pmsn; + local_lw.wd1.dump_msn = (local_lw.wd1.msn & 0x7fff); + v_link_wqe->val_wd1 = cpu_to_be32(local_lw.val_wd1); + + local_lw.val_wd0 = be32_to_cpu(v_link_wqe->val_wd0); + local_lw.wd0.o = owner; + v_link_wqe->val_wd0 = cpu_to_be32(local_lw.val_wd0); + mb(); +} + +void hifc_post_els_srq_wqe(struct hifc_srq_info_s *v_srq_info, + unsigned short buff_id) +{ + struct hifcoe_rqe_s *rqe = NULL; + struct hifcoe_rqe_s tmp_rqe; + struct hifcoe_rqe_s *linked_rqe = NULL; + struct hifc_wq_header_s *wq_header = NULL; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + buff_id < v_srq_info->valid_wqe_num, return); + + buff_entry = v_srq_info->els_buff_entry_head + buff_id; + + spin_lock(&v_srq_info->srq_spin_lock); + + /* Obtain RQE, not include link wqe */ + rqe = hifc_get_srq_entry(v_srq_info, &linked_rqe, v_srq_info->pi); + if (!rqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]post els srq,get srqe failed, valid wqe num 0x%x, pi 0x%x, pmsn 0x%x", + v_srq_info->valid_wqe_num, v_srq_info->pi, + v_srq_info->pmsn); + + spin_unlock(&v_srq_info->srq_spin_lock); + + return; + } + + /* Initialize RQE */ + /* cs section is not used */ + memset(&tmp_rqe, 0, sizeof(struct hifcoe_rqe_s)); + + /* default Obit is invalid, and set valid finally */ + hifc_build_srq_wqe_ctrls(&tmp_rqe, !v_srq_info->pi_owner, + v_srq_info->pmsn + 1); + + tmp_rqe.bds_sl.buf_addr_hi = HIFC_HIGH_32_BITS(buff_entry->buff_dma); + tmp_rqe.bds_sl.buf_addr_lo = HIFC_LOW_32_BITS(buff_entry->buff_dma); + tmp_rqe.drv_sl.wd0.user_id = buff_id; + + /* convert to big endian */ + hifc_cpu_to_big32(&tmp_rqe, sizeof(struct hifcoe_rqe_s)); + + memcpy(rqe, &tmp_rqe, sizeof(struct hifcoe_rqe_s)); + + /* reset Obit */ + hifc_set_srq_wqe_owner_be( + (struct hifcoe_wqe_ctrl_s *)(void *)&rqe->ctrl_sl, + v_srq_info->pi_owner); + + if (linked_rqe) { + /* Update Obit in linked WQE */ + hifc_set_srq_link_wqe_owner_be( + (struct hifc_link_wqe_s *)(void *)linked_rqe, + v_srq_info->pi_owner, + v_srq_info->pmsn + 1); + } + + /* Update PI and PMSN */ + hifc_update_producer_info((unsigned short)(v_srq_info->valid_wqe_num), + &v_srq_info->pi, + &v_srq_info->pi_owner); + + /* + * pmsn is 16bit. The value is added to the maximum value and is + * automatically reversed + */ + v_srq_info->pmsn++; + + /* Update pmsn in queue header */ + wq_header = (struct hifc_wq_header_s *) + (void *)v_srq_info->cqm_srq_info->q_header_vaddr; + hifc_update_srq_header(&wq_header->db_record, v_srq_info->pmsn); + + spin_unlock(&v_srq_info->srq_spin_lock); +} + +static void hifc_cfg_srq_ctx(struct hifc_srq_info_s *v_srq_info, + struct hifc_srq_ctx_s *v_srq_ctx, + unsigned int v_sge_size, + unsigned long long v_rqe_gpa) +{ + struct hifc_srq_ctx_s *srq_ctx = NULL; + struct cqm_queue_s *cqm_srq_info = NULL; + struct hifc_queue_info_bus_s queue_bus; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_ctx, return); + + cqm_srq_info = v_srq_info->cqm_srq_info; + srq_ctx = v_srq_ctx; + + srq_ctx->last_rq_pmsn = 0; + srq_ctx->cur_rqe_msn = 0; + srq_ctx->pcie_template = 0; + /* The value of CTX needs to be updated when RQE is configured */ + srq_ctx->cur_rqe_gpa = v_rqe_gpa; + srq_ctx->cur_sge_v = 0; + srq_ctx->cur_sge_l = 0; + /* The information received by the SRQ is reported through the SCQ. + * The interrupt and ArmCQ are disabled. + */ + srq_ctx->ceqn_msix = 0; + srq_ctx->int_mode = 0; + srq_ctx->cur_sge_remain_len = 0; + srq_ctx->cur_sge_id = 0; + srq_ctx->consant_sge_len = v_sge_size; + srq_ctx->cur_wqe = 0; + srq_ctx->pmsn_type = HIFC_PMSN_CI_TYPE_FROM_HOST; + srq_ctx->bdsl = 0; + srq_ctx->cr = 0; + srq_ctx->csl = 0; + srq_ctx->cf = 0; + srq_ctx->ctrl_sl = 0; + srq_ctx->cur_sge_gpa = 0; + srq_ctx->cur_pmsn_gpa = cqm_srq_info->q_header_paddr; + srq_ctx->pre_fetch_max_msn = 0; + srq_ctx->cqe_max_cnt = 0; + srq_ctx->cur_cqe_cnt = 0; + srq_ctx->arm_q = 0; + srq_ctx->cq_so_ro = 0; + srq_ctx->cqe_dma_attr_idx = 0; + srq_ctx->rq_so_ro = 0; + srq_ctx->rqe_dma_attr_idx = 0; + srq_ctx->loop_o = HIFC_SRQ_INIT_LOOP_O; + srq_ctx->ring = HIFC_QUEUE_RING; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + /* bits 60 */ + queue_bus.bus[0] |= + ((unsigned long long)(cqm_srq_info->q_ctx_paddr >> 4)); + /* bits 4 */ + queue_bus.bus[0] |= + (((unsigned long long)(srq_ctx->rqe_dma_attr_idx & 0xf)) << 60); + /* bits 2 */ + queue_bus.bus[1] |= + ((unsigned long long)(srq_ctx->rqe_dma_attr_idx >> 4)); + /* bits 2 */ + queue_bus.bus[1] |= (((unsigned long long)(srq_ctx->rq_so_ro)) << 2); + /* bits 60 */ + queue_bus.bus[1] |= + (((unsigned long long)(srq_ctx->cur_pmsn_gpa >> 4)) << 4); + /* bits 17 */ + queue_bus.bus[2] |= ((unsigned long long)(srq_ctx->consant_sge_len)); + /* bits 6 */ + queue_bus.bus[2] |= + (((unsigned long long)(srq_ctx->pcie_template)) << 17); + + srq_ctx->parity = hifc_get_parity_value((void *)queue_bus.bus, + HIFC_SRQC_BUS_ROW, + HIFC_SRQC_BUS_COL); + + hifc_cpu_to_big64((void *)srq_ctx, sizeof(struct hifc_srq_ctx_s)); +} + +static unsigned int hifc_create_srqc_via_cmdq_sync( + struct hifc_hba_s *v_hba, + struct hifc_srq_ctx_s *v_srqc, + unsigned long long v_ctx_gpa) +{ +#define HIFC_INIT_SRQC_TIMEOUT 3000 + + int ret; + unsigned int cvt_size; + struct hifcoe_cmdqe_creat_srqc_s init_srqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf alloc failed"); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SRQC); + return UNF_RETURN_ERROR; + } + + memset(&init_srqc_cmd, 0, sizeof(init_srqc_cmd)); + init_srqc_cmd.wd0.task_type = HIFCOE_TASK_T_INIT_SRQC; + init_srqc_cmd.srqc_gpa_h = HIFC_HIGH_32_BITS(v_ctx_gpa); + init_srqc_cmd.srqc_gpa_l = HIFC_LOW_32_BITS(v_ctx_gpa); + cvt_size = sizeof(init_srqc_cmd) - sizeof(init_srqc_cmd.srqc); + hifc_cpu_to_big32(&init_srqc_cmd, cvt_size); + + /* v_srqc is already big-endian */ + memcpy(init_srqc_cmd.srqc, v_srqc, sizeof(*v_srqc)); + memcpy(cmdq_in_buf->buf, &init_srqc_cmd, sizeof(init_srqc_cmd)); + cmdq_in_buf->size = sizeof(init_srqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, cmdq_in_buf, + NULL, HIFC_INIT_SRQC_TIMEOUT); + + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + + if (ret) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send creat srqc via cmdq failed, ret=%d", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SRQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SRQC); + + return RETURN_OK; +} + +static void hifc_init_els_srq_wqe(struct hifc_srq_info_s *v_srq_info) +{ + unsigned int rqe_index = 0; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + + for (rqe_index = 0; rqe_index < v_srq_info->valid_wqe_num - 1; + rqe_index++) { + buff_entry = v_srq_info->els_buff_entry_head + rqe_index; + + hifc_post_els_srq_wqe(v_srq_info, buff_entry->buff_id); + } +} + +static void hifc_free_els_srq_buff(struct hifc_hba_s *v_hba, + unsigned int srq_valid_wqe) +{ + unsigned int buff_index = 0; + struct hifc_srq_info_s *srq_info = NULL; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + srq_info = &v_hba->els_srq_info; + + if (!srq_info->els_buff_entry_head) + return; + + for (buff_index = 0; buff_index < srq_valid_wqe; buff_index++) { + buff_entry = &srq_info->els_buff_entry_head[buff_index]; + buff_entry->buff_addr = NULL; + } + + if (srq_info->buff_list.buflist) { + for (buff_index = 0; buff_index < srq_info->buff_list.buf_num; + buff_index++) { + if (srq_info->buff_list.buflist[buff_index].paddr) { + pci_unmap_single( + v_hba->pci_dev, + srq_info->buff_list.buflist[buff_index].paddr, + srq_info->buff_list.buf_size, + DMA_FROM_DEVICE); + srq_info->buff_list.buflist[buff_index].paddr = 0; + } + if (srq_info->buff_list.buflist[buff_index].vaddr) { + kfree(srq_info->buff_list.buflist[buff_index].vaddr); + srq_info->buff_list.buflist[buff_index].vaddr = NULL; + } + } + + kfree(srq_info->buff_list.buflist); + srq_info->buff_list.buflist = NULL; + } + + if (srq_info->els_buff_entry_head) { + kfree(srq_info->els_buff_entry_head); + srq_info->els_buff_entry_head = NULL; + } +} + +static unsigned int hifc_alloc_els_srq_buff(struct hifc_hba_s *v_hba, + unsigned int srq_valid_wqe) +{ + unsigned int req_buff_size = 0; + unsigned int buff_index = 0; + struct hifc_srq_info_s *srq_info = NULL; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int alloc_idx; + unsigned int cur_buf_idx = 0; + unsigned int cur_buf_offset = 0; + unsigned int buf_cnt_perhugebuf; + + srq_info = &v_hba->els_srq_info; + + /* Apply for entry buffer */ + req_buff_size = (unsigned int)(srq_valid_wqe * + sizeof(struct hifc_srq_buff_entry_s)); + srq_info->els_buff_entry_head = + (struct hifc_srq_buff_entry_s *)kmalloc(req_buff_size, + GFP_KERNEL); + if (!srq_info->els_buff_entry_head) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate ELS Srq receive buffer entrys failed"); + + return UNF_RETURN_ERROR; + } + memset(srq_info->els_buff_entry_head, 0, req_buff_size); + + buf_total_size = HIFC_SRQ_ELS_SGE_LEN * srq_valid_wqe; + + srq_info->buff_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? + BUF_LIST_PAGE_SIZE : buf_total_size; + buf_cnt_perhugebuf = + srq_info->buff_list.buf_size / HIFC_SRQ_ELS_SGE_LEN; + buf_num = srq_valid_wqe % buf_cnt_perhugebuf ? srq_valid_wqe / + buf_cnt_perhugebuf + 1 : srq_valid_wqe / + buf_cnt_perhugebuf; + srq_info->buff_list.buflist = (struct buff_list_s *) + kmalloc(buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + srq_info->buff_list.buf_num = buf_num; + + if (!srq_info->buff_list.buflist) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate ELS buf list failed out of memory"); + goto free_buff; + } + memset(srq_info->buff_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + srq_info->buff_list.buflist[alloc_idx].vaddr = + kmalloc(srq_info->buff_list.buf_size, GFP_KERNEL); + if (!srq_info->buff_list.buflist[alloc_idx].vaddr) + goto free_buff; + memset(srq_info->buff_list.buflist[alloc_idx].vaddr, 0, + srq_info->buff_list.buf_size); + + srq_info->buff_list.buflist[alloc_idx].paddr = + pci_map_single( + v_hba->pci_dev, + srq_info->buff_list.buflist[alloc_idx].vaddr, + srq_info->buff_list.buf_size, DMA_FROM_DEVICE); + if (pci_dma_mapping_error( + v_hba->pci_dev, + srq_info->buff_list.buflist[alloc_idx].paddr)) { + srq_info->buff_list.buflist[alloc_idx].paddr = 0; + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Map els srq buffer failed"); + + goto free_buff; + } + } + + /* Apply for receiving buffer and attach it to the free linked list */ + for (buff_index = 0; buff_index < srq_valid_wqe; buff_index++) { + buff_entry = &srq_info->els_buff_entry_head[buff_index]; + + cur_buf_idx = buff_index / buf_cnt_perhugebuf; + + cur_buf_offset = HIFC_SRQ_ELS_SGE_LEN * + (buff_index % buf_cnt_perhugebuf); + buff_entry->buff_addr = + srq_info->buff_list.buflist[cur_buf_idx].vaddr + + cur_buf_offset; + + buff_entry->buff_dma = + srq_info->buff_list.buflist[cur_buf_idx].paddr + + cur_buf_offset; + + buff_entry->buff_id = (unsigned short)buff_index; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[EVENT]Allocate bufnum:%u,buf_total_size:%u", + buf_num, buf_total_size); + + return RETURN_OK; + +free_buff: + hifc_free_els_srq_buff(v_hba, srq_valid_wqe); + return UNF_RETURN_ERROR; +} + +/** + * hifc_root_cmdq_enqueue - Send commands to the chip via ROOT CMDQ. + * @v_hba: hba handler to send cmd + * @v_cmdqe: cmdqe buff + * @cmd_len: cmdqe buff len + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_root_cmdq_enqueue(void *v_hba, union hifc_cmdqe_u *v_cmdqe, + unsigned short cmd_len) +{ + unsigned char wqe_type = 0; + int cmdq_ret = 0; + struct hifc_cmd_buf *cmdq_buf = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + wqe_type = (unsigned char)v_cmdqe->common.wd0.task_type; + HIFC_IO_STAT(hba, wqe_type); + + cmdq_buf = hifc_alloc_cmd_buf(hba->hw_dev_handle); + if (!cmdq_buf) { + HIFC_ERR_IO_STAT(hba, wqe_type); + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) CqmHandle(0x%p) allocate cmdq buffer failed", + hba->port_cfg.port_id, hba->hw_dev_handle); + + return UNF_RETURN_ERROR; + } + + memcpy(cmdq_buf->buf, v_cmdqe, cmd_len); + hifc_cpu_to_big32(cmdq_buf->buf, cmd_len); + cmdq_buf->size = cmd_len; + + cmdq_ret = hifc_cmdq_async(hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, cmdq_buf); + + if (cmdq_ret != RETURN_OK) { + hifc_free_cmd_buf(hba->hw_dev_handle, cmdq_buf); + HIFC_ERR_IO_STAT(hba, wqe_type); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) CqmHandle(0x%p) send buff clear cmnd failed(0x%x)", + hba->port_cfg.port_id, hba->hw_dev_handle, cmdq_ret); + + return UNF_RETURN_ERROR; + } + UNF_REFERNCE_VAR(wqe_type); + return RETURN_OK; +} + +static void hifc_send_clear_srq_cmd(struct hifc_hba_s *v_hba, + struct hifc_srq_info_s *v_srq_info) +{ + union hifc_cmdqe_u cmdqe; + struct cqm_queue_s *cqm_fcp_srq = NULL; + unsigned long flag = 0; + + memset(&cmdqe, 0, sizeof(union hifc_cmdqe_u)); + + spin_lock_irqsave(&v_srq_info->srq_spin_lock, flag); + + cqm_fcp_srq = v_srq_info->cqm_srq_info; + if (!cqm_fcp_srq) { + v_srq_info->state = HIFC_CLEAN_DONE; + spin_unlock_irqrestore(&v_srq_info->srq_spin_lock, flag); + return; + } + + cmdqe.clear_srq.wd0.task_type = HIFCOE_TASK_T_CLEAR_SRQ; + cmdqe.clear_srq.wd1.scqn = HIFC_LSW(v_hba->default_scqn); + cmdqe.clear_srq.wd1.srq_type = v_srq_info->srq_type; + cmdqe.clear_srq.srqc_gpa_h = HIFC_HIGH_32_BITS( + cqm_fcp_srq->q_ctx_paddr); + cmdqe.clear_srq.srqc_gpa_l = HIFC_LOW_32_BITS(cqm_fcp_srq->q_ctx_paddr); + + (void)queue_delayed_work(v_hba->work_queue, + &v_srq_info->del_work, + (unsigned long)msecs_to_jiffies(( + unsigned int)HIFC_SRQ_DEL_STAGE_TIMEOUT_MS)); + + spin_unlock_irqrestore(&v_srq_info->srq_spin_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port 0x%x begin to clear srq 0x%x(0x%x,0x%llx)", + v_hba->port_cfg.port_id, v_srq_info->srq_type, + HIFC_LSW(v_hba->default_scqn), + (unsigned long long)cqm_fcp_srq->q_ctx_paddr); + + /* Run the ROOT CMDQ command to issue the clear srq command. + * If the command fails to be delivered, retry upon timeout. + */ + (void)hifc_root_cmdq_enqueue(v_hba, &cmdqe, sizeof(cmdqe.clear_srq)); +} + +static void hifc_srq_clr_time_out(struct work_struct *work) +{ + struct hifc_srq_info_s *srq = NULL; + struct hifc_hba_s *hba = NULL; + struct cqm_queue_s *cqm_fcp_imm_srq = NULL; + unsigned long flag = 0; + + srq = container_of(work, struct hifc_srq_info_s, del_work.work); + + spin_lock_irqsave(&srq->srq_spin_lock, flag); + hba = srq->phba; + cqm_fcp_imm_srq = srq->cqm_srq_info; + spin_unlock_irqrestore(&srq->srq_spin_lock, flag); + + if (hba && cqm_fcp_imm_srq) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port 0x%x clear srq 0x%x stat 0x%x timeout", + hba->port_cfg.port_id, srq->srq_type, srq->state); + + /* + * If the delivery fails or the execution times out after the + * delivery, try again once + */ + srq->del_retry_time++; + + if (srq->del_retry_time < 2) + hifc_send_clear_srq_cmd(hba, srq); + else + srq->del_retry_time = 0; + } +} + +static unsigned int hifc_create_els_srq(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct cqm_queue_s *cqm_srq = NULL; + struct hifc_wq_header_s *wq_header = NULL; + struct hifc_srq_info_s *srq_info = NULL; + struct hifc_srq_ctx_s srq_ctx = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + cqm_srq = cqm_object_fc_srq_create(v_hba->hw_dev_handle, + CQM_OBJECT_NONRDMA_SRQ, + HIFC_SRQ_ELS_DATA_DEPTH, + HIFC_SRQE_SIZE, + v_hba); + if (!cqm_srq) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Create Els Srq failed"); + + return UNF_RETURN_ERROR; + } + + /* Initialize SRQ */ + srq_info = &v_hba->els_srq_info; + hifc_init_srq_info(v_hba, cqm_srq, srq_info); + srq_info->srq_type = HIFC_SRQ_ELS; + srq_info->enable = UNF_TRUE; + srq_info->state = HIFC_CLEAN_DONE; + srq_info->del_retry_time = 0; + + /* The srq lock is initialized and can be created repeatedly */ + spin_lock_init(&srq_info->srq_spin_lock); + srq_info->spin_lock_init = UNF_TRUE; + + /* Initialize queue header */ + wq_header = (struct hifc_wq_header_s *)(void *)cqm_srq->q_header_vaddr; + hifc_init_srq_header(wq_header); + + INIT_DELAYED_WORK(&srq_info->del_work, hifc_srq_clr_time_out); + + /* Apply for RQ buffer */ + ret = hifc_alloc_els_srq_buff(v_hba, srq_info->valid_wqe_num); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Els Srq buffer failed"); + + cqm_object_delete(&cqm_srq->object); + memset(srq_info, 0, sizeof(struct hifc_srq_info_s)); + return UNF_RETURN_ERROR; + } + + /* Fill RQE, update queue header */ + hifc_init_els_srq_wqe(srq_info); + + /* Fill SRQ CTX */ + memset(&srq_ctx, 0, sizeof(srq_ctx)); + hifc_cfg_srq_ctx(srq_info, &srq_ctx, HIFC_SRQ_ELS_SGE_LEN, + srq_info->cqm_srq_info->q_room_buf_1.buf_list->pa); + + ret = hifc_create_srqc_via_cmdq_sync( + v_hba, &srq_ctx, + srq_info->cqm_srq_info->q_ctx_paddr); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Creat Els Srqc failed"); + + hifc_free_els_srq_buff(v_hba, srq_info->valid_wqe_num); + cqm_object_delete(&cqm_srq->object); + memset(srq_info, 0, sizeof(struct hifc_srq_info_s)); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +void hifc_destroy_srq(void *v_hba) +{ + /* + * Receive clear els srq sts + * ---then--->>> destroy els srq + */ + struct hifc_hba_s *hba = NULL; + struct hifc_srq_info_s *srq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_hba, return); + + hba = (struct hifc_hba_s *)v_hba; + srq_info = &hba->els_srq_info; + + /* release receive buffer */ + hifc_free_els_srq_buff(hba, srq_info->valid_wqe_num); + + /* release srq info */ + if (srq_info->cqm_srq_info) { + cqm_object_delete(&srq_info->cqm_srq_info->object); + srq_info->cqm_srq_info = NULL; + } + if (srq_info->spin_lock_init) + srq_info->spin_lock_init = UNF_FALSE; + srq_info->phba = NULL; + srq_info->enable = UNF_FALSE; + srq_info->state = HIFC_CLEAN_DONE; +} + +/** + * hifc_create_srq - Create SRQ, which contains four SRQ for receiving + * instant data and a SRQ for receiving ELS data. + * @v_hba: hba handler + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_create_srq(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + /* Create ELS SRQ */ + ret = hifc_create_els_srq(v_hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Create Els Srq failed"); + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +unsigned int hifc_create_common_share_queues(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + /* Create & Init 8 pairs SCQ */ + ret = hifc_create_scq(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Create scq failed"); + + return UNF_RETURN_ERROR; + } + + /* Alloc SRQ resource for SIRT & ELS */ + ret = hifc_create_srq(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Create srq failed"); + + hifc_flush_scq_ctx(hba); + hifc_destroy_scq(hba); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +void hifc_destroy_common_share_queues(void *v_hba) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_hba, return); + + hifc_destroy_scq((struct hifc_hba_s *)v_hba); + hifc_destroy_srq((struct hifc_hba_s *)v_hba); +} + +static unsigned char hifc_map_fcp_data_cos(struct hifc_hba_s *v_hba) +{ + unsigned char i = 0; + unsigned char min_cnt_index = HIFC_PACKET_COS_FC_DATA; + int get_init_index = UNF_FALSE; + + for (i = 0; i < HIFC_MAX_COS_NUM; i++) { + /* + * Check whether the CoS is valid for the FC and cannot be + * occupied by the CMD + */ + if ((!(v_hba->cos_bit_map & (1 << i))) || + (i == HIFC_PACKET_COS_FC_CMD)) { + continue; + } + + if (get_init_index == UNF_FALSE) { + min_cnt_index = i; + get_init_index = UNF_TRUE; + continue; + } + + if (atomic_read(&v_hba->cos_rport_cnt[i]) < + atomic_read(&v_hba->cos_rport_cnt[min_cnt_index])) { + min_cnt_index = i; + } + } + + atomic_inc(&v_hba->cos_rport_cnt[min_cnt_index]); + + return min_cnt_index; +} + +static void hifc_update_cos_rport_cnt(struct hifc_hba_s *v_hba, + unsigned char v_cos_index) +{ + if ((v_cos_index >= HIFC_MAX_COS_NUM) || + (v_cos_index == HIFC_PACKET_COS_FC_CMD) || + (!(v_hba->cos_bit_map & (1 << v_cos_index))) || + (atomic_read(&v_hba->cos_rport_cnt[v_cos_index]) == 0)) { + return; + } + + atomic_dec(&v_hba->cos_rport_cnt[v_cos_index]); +} + +void hifc_invalid_parent_sq(struct hifc_parent_sq_info_s *sq_info) +{ + sq_info->rport_index = INVALID_VALUE32; + sq_info->context_id = INVALID_VALUE32; + sq_info->sq_queue_id = INVALID_VALUE32; + sq_info->cache_id = INVALID_VALUE32; + sq_info->max_sqe_num = INVALID_VALUE32; + sq_info->wqe_num_per_buf = INVALID_VALUE32; + sq_info->wqe_size = HIFC_SCQE_SIZE; + sq_info->wqe_offset = INVALID_VALUE32; + sq_info->head_start_cmsn = HIFC_MAX_MSN; + sq_info->head_end_cmsn = HIFC_MAX_MSN; + sq_info->last_pmsn = INVALID_VALUE16; + sq_info->last_pi_owner = INVALID_VALUE16; + sq_info->local_port_id = INVALID_VALUE32; + sq_info->remote_port_id = INVALID_VALUE32; + sq_info->phba = NULL; + sq_info->del_start_jiff = INVALID_VALUE64; + sq_info->port_in_flush = UNF_FALSE; + sq_info->sq_in_sess_rst = UNF_FALSE; + sq_info->oqid_rd = INVALID_VALUE16; + sq_info->oqid_wr = INVALID_VALUE16; + sq_info->srq_ctx_addr = 0; + atomic_set(&sq_info->sq_cashed, UNF_FALSE); + sq_info->vport_id = 0; + sq_info->sirt_dif_control.protect_opcode = UNF_DIF_ACTION_NONE; + atomic_set(&sq_info->sq_valid, UNF_FALSE); + atomic_set(&sq_info->fush_done_wait_cnt, 0); + + memset(&sq_info->delay_sqe, 0, + sizeof(struct hifc_delay_sqe_ctrl_info_s)); + memset(sq_info->io_stat, 0, sizeof(sq_info->io_stat)); +} + +static void hifc_free_link_list_wpg(struct hifc_parent_sq_info_s *v_sq) +{ + unsigned long flag = 0; + struct hifc_hba_s *hba = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct list_head *entry_head_wqe_page = NULL; + struct hifc_sq_wqe_page_s *sq_wpg = NULL; + + hba = (struct hifc_hba_s *)v_sq->phba; + + list_for_each_safe(node, next_node, &v_sq->list_linked_list_sq) { + sq_wpg = list_entry(node, struct hifc_sq_wqe_page_s, entry_wpg); + memset((void *)sq_wpg->wpg_addr, WQE_MARKER_0, + hba->sq_wpg_pool.wpg_size); + + spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); + + entry_head_wqe_page = &sq_wpg->entry_wpg; + list_del(entry_head_wqe_page); + list_add_tail(entry_head_wqe_page, + &hba->sq_wpg_pool.list_free_wpg_pool); + + /* WqePage Pool counter */ + atomic_dec(&v_sq->wqe_page_cnt); + atomic_dec(&hba->sq_wpg_pool.wpg_in_use); + + spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); + } + + HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) Sq(0x%x) link list destroyed, Sq.WqePageCnt=0x%x, SqWpgPool.wpg_in_use=0x%x", + hba->port_cfg.port_id, v_sq->rport_index, v_sq->context_id, + atomic_read(&v_sq->wqe_page_cnt), + atomic_read(&hba->sq_wpg_pool.wpg_in_use)); +} + +static void hifc_free_parent_sq(struct hifc_hba_s *v_hba, + struct hifc_parent_queue_info_s *v_parentq_info) +{ + unsigned int ctx_flush_done = 0; + unsigned int *ctx_dw = NULL; + struct hifc_parent_sq_info_s *sq_info = NULL; + unsigned int delay_cnt = 0; + + sq_info = &v_parentq_info->parent_sq_info; + + /* Free data cos */ + hifc_update_cos_rport_cnt(v_hba, v_parentq_info->queue_data_cos); + + hifc_free_link_list_wpg(sq_info); + + if (sq_info->queue_header_original) { + pci_unmap_single(v_hba->pci_dev, + sq_info->queue_hdr_phy_addr_original, + sizeof(struct hifc_queue_header_s) + + HIFC_SQ_HEADER_ADDR_ALIGN_SIZE, + DMA_BIDIRECTIONAL); + kfree(sq_info->queue_header_original); + sq_info->queue_header_original = NULL; + } + + if (v_parentq_info->parent_ctx.cqm_parent_ctx_obj) { + ctx_dw = (unsigned int *)((void *)( + v_parentq_info->parent_ctx.cqm_parent_ctx_obj->vaddr)); + ctx_flush_done = ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + mb(); + if ((v_parentq_info->offload_state == + HIFC_QUEUE_STATE_DESTROYING) && (ctx_flush_done == 0)) { + do { + ctx_flush_done = + ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + mb(); + if (ctx_flush_done != 0) + break; + delay_cnt++; + } while (delay_cnt < 100); + + if (ctx_flush_done == 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) Rport(0x%x) flush done is not set", + v_hba->port_cfg.port_id, + sq_info->rport_index); + } + } + + cqm_object_delete( + &v_parentq_info->parent_ctx.cqm_parent_ctx_obj->object); + v_parentq_info->parent_ctx.cqm_parent_ctx_obj = NULL; + } + + hifc_invalid_parent_sq(sq_info); +} + +static inline struct hifcoe_sqe_s *hifc_get_wqe_page_entry( + struct hifc_sq_wqe_page_s *v_wpg, + unsigned int wqe_offset) +{ + struct hifcoe_sqe_s *wpg = NULL; + + wpg = (struct hifcoe_sqe_s *)(v_wpg->wpg_addr); + wpg += wqe_offset; + + return wpg; +} + +static struct hifc_sq_wqe_page_s *hifc_add_tail_wqe_page( + struct hifc_parent_sq_info_s *v_sq) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_sq_wqe_page_s *esgl = NULL; + struct list_head *free_list_head = NULL; + unsigned long flag = 0; + + hba = (struct hifc_hba_s *)v_sq->phba; + + spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); + + /* Get a WqePage from hba->sq_wpg_pool.list_free_wpg_pool, and add + * to v_sq.list_SqTailWqePage + */ + if (!list_empty(&hba->sq_wpg_pool.list_free_wpg_pool)) { + free_list_head = (&hba->sq_wpg_pool.list_free_wpg_pool)->next; + list_del(free_list_head); + list_add_tail(free_list_head, &v_sq->list_linked_list_sq); + esgl = list_entry(free_list_head, struct hifc_sq_wqe_page_s, + entry_wpg); + + /* WqePage Pool counter */ + atomic_inc(&hba->sq_wpg_pool.wpg_in_use); + } else { + esgl = NULL; + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]SQ pool is empty when SQ(0x%x) try to get wqe page", + v_sq->rport_index); + HIFC_HBA_STAT(hba, HIFC_STAT_SQ_POOL_EMPTY); + } + + spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); + + return esgl; +} + +static struct hifc_sq_wqe_page_s *hifc_add_one_wqe_page( + struct hifc_parent_sq_info_s *v_sq) +{ + unsigned int wqe_idx = 0; + struct hifc_sq_wqe_page_s *wqe_page = NULL; + struct hifcoe_sqe_s *sqe_in_wp = NULL; + struct hifc_link_wqe_s *link_wqe_in_wpg = NULL; + struct hifc_link_wqe_s link_wqe; + + /* Add a new Wqe Page */ + wqe_page = hifc_add_tail_wqe_page(v_sq); + + if (!wqe_page) + return NULL; + + for (wqe_idx = 0; wqe_idx <= v_sq->wqe_num_per_buf; wqe_idx++) { + sqe_in_wp = hifc_get_wqe_page_entry(wqe_page, wqe_idx); + sqe_in_wp->ctrl_sl.ch.ctrl_ch_val = 0; + } + + /* Set last WqePage as linkwqe */ + link_wqe_in_wpg = (struct hifc_link_wqe_s *) + hifc_get_wqe_page_entry(wqe_page, v_sq->wqe_num_per_buf); + link_wqe.val_wd0 = 0; + link_wqe.val_wd1 = 0; + link_wqe.next_page_addr_hi = 0; + link_wqe.next_page_addr_lo = 0; + link_wqe.wd0.wf = CQM_WQE_WF_LINK; + link_wqe.wd0.ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; + link_wqe.wd0.o = !(v_sq->last_pi_owner); + link_wqe.wd1.lp = CQM_LINK_WQE_LP_INVALID; + hifc_cpu_to_big32(&link_wqe, sizeof(struct hifc_link_wqe_s)); + memcpy(link_wqe_in_wpg, &link_wqe, sizeof(struct hifc_link_wqe_s)); + + return wqe_page; +} + +static void hifc_alloc_sq_oqid(struct hifc_hba_s *v_hba, + struct hifc_parent_sq_info_s *v_sq) +{ + unsigned short read_oqid = INVALID_VALUE16; + unsigned short write_oqid = INVALID_VALUE16; + unsigned short vf_id = INVALID_VALUE16; + unsigned short mask_value = hifc_host_oq_id_mask(v_hba->hw_dev_handle); + unsigned int cqm_xid = v_sq->context_id; + + vf_id = hifc_global_func_id(v_hba->hw_dev_handle); + + HIFC_OQID_RD((unsigned short)cqm_xid, vf_id, mask_value, read_oqid); + HIFC_OQID_WR((unsigned short)cqm_xid, vf_id, mask_value, write_oqid); + + v_sq->oqid_rd = read_oqid; + v_sq->oqid_wr = write_oqid; +} + +static void hifc_parent_sq_operate_time_out(struct work_struct *work) +{ + int free_sq = UNF_FALSE; + unsigned long flag = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + struct hifc_parent_queue_info_s *parent_queue = NULL; + struct hifc_hba_s *hba = NULL; + + HIFC_CHECK(INVALID_VALUE32, work, return); + + parent_sq = container_of(work, struct hifc_parent_sq_info_s, + del_work.work); + parent_queue = container_of(parent_sq, struct hifc_parent_queue_info_s, + parent_sq_info); + hba = (struct hifc_hba_s *)parent_sq->phba; + HIFC_CHECK(INVALID_VALUE32, hba, return); + + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + if (parent_queue->offload_state == HIFC_QUEUE_STATE_DESTROYING) { + free_sq = UNF_TRUE; + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "Port(0x%x) sq rport index(0x%x) local nportid(0x%x),remote nportid(0x%x) reset timeout.", + hba->port_cfg.port_id, + parent_sq->rport_index, + parent_sq->local_port_id, + parent_sq->remote_port_id); + } + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, flag); + + /* In the server scenario, if the connection deletion times out, you + * can only wait or perform the FLR operation on the port. If the FLR + * command is run, the fault diffusion mode will be used. + */ + if ((parent_queue->parent_sq_info.del_start_jiff > hba->reset_time) && + (parent_queue->parent_sq_info.del_start_jiff != INVALID_VALUE64) && + (hba->removing == UNF_FALSE)) { + /* There is nothing to do if session reset timeout */ + ; + } + + if (free_sq == UNF_TRUE) { + /* There is nothing to do if session reset timeout */ + ; + } +} + +static void hifc_parent_sq_wait_flush_done_time_out(struct work_struct *work) +{ + unsigned long flag = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + struct hifc_parent_queue_info_s *parent_queue = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int ctx_flush_done; + unsigned int *ctx_dw = NULL; + int ret; + + HIFC_CHECK(INVALID_VALUE32, work, return); + + parent_sq = container_of(work, struct hifc_parent_sq_info_s, + flush_done_tmo_work.work); + + HIFC_CHECK(INVALID_VALUE32, parent_sq, return); + + parent_queue = container_of(parent_sq, struct hifc_parent_queue_info_s, + parent_sq_info); + hba = (struct hifc_hba_s *)parent_sq->phba; + HIFC_CHECK(INVALID_VALUE32, hba, return); + HIFC_CHECK(INVALID_VALUE32, parent_queue, return); + + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + + if (parent_queue->offload_state != HIFC_QUEUE_STATE_DESTROYING) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) sq rport index(0x%x) is not destroying status,offloadsts is %d", + hba->port_cfg.port_id, + parent_sq->rport_index, + parent_queue->offload_state); + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, + flag); + return; + } + + if (parent_queue->parent_ctx.cqm_parent_ctx_obj) { + ctx_dw = (unsigned int *)((void *) + (parent_queue->parent_ctx.cqm_parent_ctx_obj->vaddr)); + ctx_flush_done = + ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + if (ctx_flush_done == 0) { + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + + if (atomic_read(&parent_queue->parent_sq_info.fush_done_wait_cnt) < HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]Port(0x%x) sq rport index(0x%x) wait flush done timeout %d times", + hba->port_cfg.port_id, + parent_sq->rport_index, + atomic_read(&parent_queue->parent_sq_info.fush_done_wait_cnt)); + + atomic_inc(&parent_queue->parent_sq_info.fush_done_wait_cnt); + + /* Delay Free Sq info */ + ret = queue_delayed_work(hba->work_queue, + &parent_queue->parent_sq_info.flush_done_tmo_work, + (unsigned long)msecs_to_jiffies((unsigned int)HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS)); + if (ret == (int)false) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) rport(0x%x) queue delayed work failed iret:%d", + hba->port_cfg.port_id, + parent_sq->rport_index, + ret); + HIFC_HBA_STAT(hba, HIFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK); + } + + return; + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) sq rport index(0x%x) has wait flush done %d times,do not free sq", + hba->port_cfg.port_id, + parent_sq->rport_index, + atomic_read(&parent_queue->parent_sq_info.fush_done_wait_cnt)); + + HIFC_HBA_STAT(hba, HIFC_STAT_CTXT_FLUSH_DONE); + + return; + } + } + } + + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) sq rport index(0x%x) flush done bit is ok,free sq now", + hba->port_cfg.port_id, + parent_sq->rport_index); + + hifc_free_parent_queue_info(hba, parent_queue); +} + +unsigned int hifc_alloc_parent_sq( + struct hifc_hba_s *v_hba, + struct hifc_parent_queue_info_s *v_parentq_info, + struct unf_rport_info_s *v_rport_info) +{ + struct hifc_parent_sq_info_s *sq_ctrl = NULL; + struct hifc_sq_wqe_page_s *head_wpg = NULL; + struct cqm_qpc_mpt_s *prnt_ctx = NULL; + unsigned int queue_header_alloc_size = 0; + unsigned long flag = 0; + + /* Craete parent context via CQM */ + prnt_ctx = cqm_object_qpc_mpt_create(v_hba->hw_dev_handle, + CQM_OBJECT_SERVICE_CTX, + HIFC_CNTX_SIZE_256B, + v_parentq_info, + CQM_INDEX_INVALID); + if (!prnt_ctx) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create parent context failed, CQM_INDEX is 0x%x", + CQM_INDEX_INVALID); + goto parent_create_fail; + } + v_parentq_info->parent_ctx.cqm_parent_ctx_obj = prnt_ctx; + + /* Initialize struct hifc_parent_sq_info_s */ + sq_ctrl = &v_parentq_info->parent_sq_info; + sq_ctrl->phba = (void *)v_hba; + sq_ctrl->rport_index = v_rport_info->rport_index; + sq_ctrl->context_id = prnt_ctx->xid; + sq_ctrl->sq_queue_id = HIFC_QID_SQ; + sq_ctrl->cache_id = INVALID_VALUE32; + sq_ctrl->max_sqe_num = v_hba->exit_count; + /* Reduce one Link Wqe */ + sq_ctrl->wqe_num_per_buf = v_hba->sq_wpg_pool.wqe_per_wpg - 1; + sq_ctrl->wqe_size = HIFC_SQE_SIZE; + sq_ctrl->wqe_offset = 0; + sq_ctrl->head_start_cmsn = 0; + sq_ctrl->head_end_cmsn = HIFC_GET_WP_END_CMSN(0, + sq_ctrl->wqe_num_per_buf); + sq_ctrl->last_pmsn = 0; + /* Linked List SQ Owner Bit 1 valid, 0 invalid */ + sq_ctrl->last_pi_owner = 1; + sq_ctrl->local_port_id = INVALID_VALUE32; + sq_ctrl->remote_port_id = INVALID_VALUE32; + sq_ctrl->sq_in_sess_rst = UNF_FALSE; + atomic_set(&sq_ctrl->sq_valid, UNF_TRUE); + sq_ctrl->del_start_jiff = INVALID_VALUE64; + sq_ctrl->service_type = HIFC_GET_SERVICE_TYPE(v_hba); + sq_ctrl->vport_id = 0; + sq_ctrl->sirt_dif_control.protect_opcode = UNF_DIF_ACTION_NONE; + hifc_alloc_sq_oqid(v_hba, sq_ctrl); + atomic_set(&sq_ctrl->fush_done_wait_cnt, 0); + + /* Check whether the HBA is in the Linkdown state. Note that + * offload_state must be in the non-FREE state. + */ + spin_lock_irqsave(&v_hba->flush_state_lock, flag); + sq_ctrl->port_in_flush = v_hba->in_flushing; + spin_unlock_irqrestore(&v_hba->flush_state_lock, flag); + + INIT_LIST_HEAD(&sq_ctrl->list_linked_list_sq); + atomic_set(&sq_ctrl->wqe_page_cnt, 0); + atomic_set(&sq_ctrl->sq_dbl_cnt, 0); + atomic_set(&sq_ctrl->sqe_minus_cqe_cnt, 1); + atomic_set(&sq_ctrl->sq_wqe_cnt, 0); + atomic_set(&sq_ctrl->sq_cqe_cnt, 0); + memset(sq_ctrl->io_stat, 0, sizeof(sq_ctrl->io_stat)); + + INIT_DELAYED_WORK(&sq_ctrl->del_work, hifc_parent_sq_operate_time_out); + INIT_DELAYED_WORK(&sq_ctrl->flush_done_tmo_work, + hifc_parent_sq_wait_flush_done_time_out); + + memset(&sq_ctrl->delay_sqe, 0, + sizeof(struct hifc_delay_sqe_ctrl_info_s)); + + /* Allocate and initialize the Queue Header space. 64B alignment is + * required. Additional 64B is applied for alignment + */ + queue_header_alloc_size = sizeof(struct hifc_queue_header_s) + + HIFC_SQ_HEADER_ADDR_ALIGN_SIZE; + sq_ctrl->queue_header_original = kmalloc(queue_header_alloc_size, + GFP_ATOMIC); + if (!sq_ctrl->queue_header_original) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]RPort(0x%x) create SQ queue header failed", + v_rport_info->rport_index); + goto qheader_create_fail; + } + + memset((unsigned char *)sq_ctrl->queue_header_original, 0, + queue_header_alloc_size); + + sq_ctrl->queue_hdr_phy_addr_original = pci_map_single( + v_hba->pci_dev, + sq_ctrl->queue_header_original, + queue_header_alloc_size, + DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(v_hba->pci_dev, + sq_ctrl->queue_hdr_phy_addr_original)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]RPort(0x%x) SQ queue header DMA mapping failed", + v_rport_info->rport_index); + goto qheader_dma_map_fail; + } + + /* Obtains the 64B alignment address */ + sq_ctrl->queue_header = (struct hifc_queue_header_s *) + HIFC_ADDR_64_ALIGN( + (unsigned long long) + (sq_ctrl->queue_header_original)); + sq_ctrl->queue_hdr_phy_addr = + HIFC_ADDR_64_ALIGN(sq_ctrl->queue_hdr_phy_addr_original); + + /* Each SQ is allocated with a Wqe Page by default. The WqePageCnt is + * incremented by one + */ + head_wpg = hifc_add_one_wqe_page(sq_ctrl); + if (!head_wpg) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]RPort(0x%x) create SQ first wqe page failed", + v_rport_info->rport_index); + goto headwpg_create_fail; + } + + atomic_inc(&sq_ctrl->wqe_page_cnt); + + return RETURN_OK; + +headwpg_create_fail: + pci_unmap_single(v_hba->pci_dev, sq_ctrl->queue_hdr_phy_addr_original, + queue_header_alloc_size, DMA_BIDIRECTIONAL); + +qheader_dma_map_fail: + kfree(sq_ctrl->queue_header_original); + sq_ctrl->queue_header_original = NULL; + +qheader_create_fail: + cqm_object_delete(&prnt_ctx->object); + +parent_create_fail: + v_parentq_info->parent_ctx.cqm_parent_ctx_obj = NULL; + + return UNF_RETURN_ERROR; +} + +static void hifc_init_prnt_ctx_sq_qinfo( + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifc_parent_sq_info_s *sq = NULL; + struct hifc_sq_wqe_page_s *head_wqe_page = NULL; + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_sq_qinfo_s *parent_sq_ctx = NULL; + struct hifc_queue_info_bus_s queue_bus; + + /* Obtains the Parent Context address */ + sq = &v_parent_qinfo->parent_sq_info; + ctx = (struct hifcoe_parent_context_s *)(void *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + head_wqe_page = HIFC_GET_SQ_HEAD(sq); + + parent_sq_ctx = &ctx->sq_qinfo; + + /* The PMSN is updated by the host driver */ + parent_sq_ctx->pmsn_type = HIFC_PMSN_CI_TYPE_FROM_HOST; + + /* Indicates the value of O of the valid SQE in the current round of SQ. + * The value of Linked List SQ is always one, and the value of 0 is + * invalid. + */ + /* current valid o-bit */ + parent_sq_ctx->loop_o = HIFC_OWNER_DRIVER_PRODUCT; + + /* should be opposite from loop_o */ + parent_sq_ctx->cur_wqe_o = ~(parent_sq_ctx->loop_o); + + /* the first sqe's gpa */ + parent_sq_ctx->cur_sqe_gpa = head_wqe_page->wpg_phy_addr; + + /* Indicates the GPA of the Queue header that is initialized to the SQ + * in the Host memory. The value must be 16-byte aligned. + */ + parent_sq_ctx->pmsn_gpa = sq->queue_hdr_phy_addr; + if (wqe_pre_load != 0) + parent_sq_ctx->pmsn_gpa |= HIFC_SQ_LINK_PRE; + + /* + * This field is used to fill in the dmaattr_idx field of the ComboDMA. + * The default value is 0 + */ + parent_sq_ctx->sqe_dmaattr_idx = HIFC_DMA_ATTR_OFST; + + /* + * This field is filled using the value of RO_SO in the SGL0 of + * the ComboDMA + */ + parent_sq_ctx->sq_so_ro = HIFC_PCIE_RELAXED_ORDERING; + + parent_sq_ctx->ring = HIFC_QUEUE_LINK_STYLE; + + /* This field is used to set the SGL0 field of the Child solicDMA */ + parent_sq_ctx->zerocopy_dmaattr_idx = HIFC_DMA_ATTR_OFST; + + parent_sq_ctx->zerocopy_so_ro = HIFC_PCIE_RELAXED_ORDERING; + + /* PCIe attribute information */ + parent_sq_ctx->pcie_template = HIFC_PCIE_TEMPLATE; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + /* bits 20 */ + queue_bus.bus[0] |= ((unsigned long long)(sq->context_id & 0xfffff)); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->sqe_dmaattr_idx)) << 20); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->sq_so_ro)) << 26); + /* bits 1 */ + queue_bus.bus[0] |= (((unsigned long long)(parent_sq_ctx->ring)) << 28); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->zerocopy_dmaattr_idx)) + << 29); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->zerocopy_so_ro)) << 35); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->pcie_template)) << 37); + /* bits 21 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->pmsn_gpa >> 4)) << 43); + /* bits 39 */ + queue_bus.bus[1] |= + ((unsigned long long)(parent_sq_ctx->pmsn_gpa >> 25)); + /* bits 1 */ + queue_bus.bus[1] |= + (((unsigned long long)(parent_sq_ctx->pmsn_type)) << 39); + + parent_sq_ctx->parity = + hifc_get_parity_value(queue_bus.bus, HIFC_SQC_BUS_ROW, + HIFC_SQC_BUS_COL); + + hifc_cpu_to_big64(parent_sq_ctx, sizeof(struct hifcoe_sq_qinfo_s)); +} + +static void hifc_init_parent_ctx_sqc_qinfo( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + unsigned int resp_scqn = 0; + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_scq_qinfo_s *resp_parent_scq_ctx = NULL; + struct hifc_queue_info_bus_s queue_bus; + + /* + * Obtains the queue id of the scq returned by the CQM when the SCQ + * is created + */ + resp_scqn = v_parent_qinfo->parent_sts_scq_info.cqm_queue_id; + + /* Obtains the Parent Context address */ + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + + resp_parent_scq_ctx = &ctx->resp_scq_qinfo; + resp_parent_scq_ctx->hw_scqc_config.info.rq_th2_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.rq_th1_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.rq_th0_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.rq_min_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_th2_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_th1_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_th0_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_min_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.scq_n = + (unsigned long long)resp_scqn; + resp_parent_scq_ctx->hw_scqc_config.info.parity = 0; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + queue_bus.bus[0] = resp_parent_scq_ctx->hw_scqc_config.pctxt_val1; + resp_parent_scq_ctx->hw_scqc_config.info.parity = + hifc_get_parity_value( + queue_bus.bus, + HIFC_HW_SCQC_BUS_ROW, + HIFC_HW_SCQC_BUS_COL); + + hifc_cpu_to_big64(resp_parent_scq_ctx, + sizeof(struct hifcoe_scq_qinfo_s)); +} + +static void hifc_init_parent_ctx_srq_qinfo( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifc_hba_s *hba = NULL; + struct hifcoe_parent_context_s *ctx = NULL; + struct cqm_queue_s *cqm_els_srq = NULL; + struct hifc_parent_sq_info_s *sq = NULL; + struct hifc_queue_info_bus_s queue_bus; + + /* Obtains the SQ address */ + sq = &v_parent_qinfo->parent_sq_info; + + /* Obtains the Parent Context address */ + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + + hba = (struct hifc_hba_s *)v_hba; + cqm_els_srq = hba->els_srq_info.cqm_srq_info; + + /* Initialize the Parent SRQ INFO used when the ELS is received */ + ctx->els_srq_info.srqc_gpa = cqm_els_srq->q_ctx_paddr >> 4; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + queue_bus.bus[0] = ctx->els_srq_info.srqc_gpa; + ctx->els_srq_info.parity = hifc_get_parity_value( + queue_bus.bus, + HIFC_HW_SRQC_BUS_ROW, + HIFC_HW_SRQC_BUS_COL); + + hifc_cpu_to_big64(&ctx->els_srq_info, + sizeof(struct hifcoe_srq_qinfo_s)); + + ctx->imm_srq_info.srqc_gpa = 0; + sq->srq_ctx_addr = 0; +} + +static void hifc_init_parent_rsvd_qinfo( + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_hw_rsvd_queue_s *hw_rsvd_qinfo = NULL; + unsigned short max_seq = 0; + unsigned int each = 0, seq_index = 0; + + /* Obtains the Parent Context address */ + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + hw_rsvd_qinfo = (struct hifcoe_hw_rsvd_queue_s *)&ctx->hw_rsvdq; + memset(hw_rsvd_qinfo->seq_id_bitmap, 0, + sizeof(hw_rsvd_qinfo->seq_id_bitmap)); + + max_seq = HIFC_HRQI_SEQ_ID_MAX; + + /* special set for sequence id 0, which is always kept by ucode for + * sending fcp-cmd + */ + hw_rsvd_qinfo->seq_id_bitmap[HIFC_HRQI_SEQ_SEPCIAL_ID] = 1; + seq_index = HIFC_HRQI_SEQ_SEPCIAL_ID - + (max_seq >> HIFC_HRQI_SEQ_INDEX_SHIFT); + + /* Set the unavailable mask to start from max + 1 */ + for (each = (max_seq % HIFC_HRQI_SEQ_INDEX_MAX) + 1; + each < HIFC_HRQI_SEQ_INDEX_MAX; each++) { + hw_rsvd_qinfo->seq_id_bitmap[seq_index] |= 0x1 << each; + } + + hw_rsvd_qinfo->seq_id_bitmap[seq_index] = + cpu_to_be64(hw_rsvd_qinfo->seq_id_bitmap[seq_index]); + + /* sepcial set for sequence id 0 */ + if (seq_index != HIFC_HRQI_SEQ_SEPCIAL_ID) { + hw_rsvd_qinfo->seq_id_bitmap[HIFC_HRQI_SEQ_SEPCIAL_ID] = + cpu_to_be64( + hw_rsvd_qinfo->seq_id_bitmap[HIFC_HRQI_SEQ_SEPCIAL_ID]); + } + + for (each = 0; each < seq_index; each++) + hw_rsvd_qinfo->seq_id_bitmap[each] = HIFC_HRQI_SEQ_INVALID_ID; + + /* no matter what the range of seq id, last_req_seq_id is fixed + * value 0xff + */ + hw_rsvd_qinfo->wd0.last_req_seq_id = HIFC_HRQI_SEQ_ID_MAX; + hw_rsvd_qinfo->wd0.xid = v_parent_qinfo->parent_sq_info.context_id; + + *(unsigned long long *)&hw_rsvd_qinfo->wd0 = + cpu_to_be64(*(unsigned long long *)&hw_rsvd_qinfo->wd0); +} + +static void hifc_init_oqid_in_ctx( + struct hifcoe_parent_context_s *v_parent_ctx, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + v_parent_ctx->sw_section.oqid_rd = + cpu_to_be16(v_parent_qinfo->parent_sq_info.oqid_rd); + v_parent_ctx->sw_section.oqid_wr = + cpu_to_be16(v_parent_qinfo->parent_sq_info.oqid_wr); +} + +static void hifc_init_parent_sw_section_info( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ +#define HIFC_VLAN_ENABLE (1) + + unsigned short rport_index; + struct hifc_hba_s *hba = NULL; + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_sw_section_s *sw_section = NULL; + + /* Obtains the Parent Context address */ + hba = (struct hifc_hba_s *)v_hba; + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + sw_section = &ctx->sw_section; + + /* xid+vPortId */ + sw_section->sw_ctxt_vport_xid.xid = + v_parent_qinfo->parent_sq_info.context_id; + sw_section->sw_ctxt_vport_xid.vport = + v_parent_qinfo->parent_sq_info.vport_id; + sw_section->sw_ctxt_vport_xid.csctrl = 0; + hifc_cpu_to_big32(&sw_section->sw_ctxt_vport_xid, + sizeof(sw_section->sw_ctxt_vport_xid)); + + /* conn_id */ + rport_index = HIFC_LSW(v_parent_qinfo->parent_sq_info.rport_index); + sw_section->conn_id = cpu_to_be16(rport_index); + + /* Immediate parameters */ + sw_section->immi_rq_page_size = 0; + + /* Parent SCQ INFO used for sending packets to the Cmnd */ + sw_section->scq_num_rcv_cmd = + cpu_to_be32(v_parent_qinfo->parent_cmd_scq_info.cqm_queue_id); + + /* sw_ctxt_misc */ + sw_section->sw_ctxt_misc.dw.srv_type = + v_parent_qinfo->parent_sq_info.service_type; + sw_section->sw_ctxt_misc.dw.port_id = hba->port_index; + + /* only the VN2VF mode is supported */ + sw_section->sw_ctxt_misc.dw.vlan_id = 0; + hifc_cpu_to_big32(&sw_section->sw_ctxt_misc.pctxt_val0, + sizeof(sw_section->sw_ctxt_misc.pctxt_val0)); + + /* oqid_rd, oqid_wr */ + hifc_init_oqid_in_ctx(ctx, v_parent_qinfo); + + /* Configuring the combo length */ + sw_section->per_xmit_data_size = cpu_to_be32(combo_length_kb * 1024); + + /* sw_ctxt_config */ + sw_section->sw_ctxt_config.dw.work_mode = HIFC_PORT_MODE_INI; + + sw_section->sw_ctxt_config.dw.status = FCOE_PARENT_STATUS_INVALID; + sw_section->sw_ctxt_config.dw.cos = hba->port_index; + sw_section->sw_ctxt_config.dw.oq_cos_cmd = HIFC_PACKET_COS_FC_CMD; + sw_section->sw_ctxt_config.dw.oq_cos_data = + v_parent_qinfo->queue_data_cos; + sw_section->sw_ctxt_config.dw.priority = 0; + sw_section->sw_ctxt_config.dw.vlan_enable = HIFC_VLAN_ENABLE; + sw_section->sw_ctxt_config.dw.sgl_num = dif_sgl_mode; + hifc_cpu_to_big32(&sw_section->sw_ctxt_config.pctxt_val1, + sizeof(sw_section->sw_ctxt_config.pctxt_val1)); + + hifc_cpu_to_big32(&sw_section->immi_dif_info, + sizeof(sw_section->immi_dif_info)); + + sw_section->cmd_scq_gpa_h = + HIFC_HIGH_32_BITS(hba->scq_info[v_parent_qinfo->parent_cmd_scq_info.local_queue_id].cqm_scq_info->q_header_paddr); + sw_section->cmd_scq_gpa_l = + HIFC_LOW_32_BITS(hba->scq_info[v_parent_qinfo->parent_cmd_scq_info.local_queue_id].cqm_scq_info->q_header_paddr); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) CmdLocalScqn(0x%x) QheaderGpaH(0x%x) QheaderGpaL(0x%x)", + hba->port_cfg.port_id, + v_parent_qinfo->parent_sq_info.rport_index, + v_parent_qinfo->parent_cmd_scq_info.local_queue_id, + sw_section->cmd_scq_gpa_h, + sw_section->cmd_scq_gpa_l); + + hifc_cpu_to_big32(&sw_section->cmd_scq_gpa_h, + sizeof(sw_section->cmd_scq_gpa_h)); + hifc_cpu_to_big32(&sw_section->cmd_scq_gpa_l, + sizeof(sw_section->cmd_scq_gpa_l)); +} + +void hifc_init_parent_ctx(void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifcoe_parent_context_s *ctx = NULL; + + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + + /* Initialize Parent Context */ + memset(ctx, 0, HIFC_CNTX_SIZE_256B); + + /* Initialize the Queue Info hardware area */ + hifc_init_prnt_ctx_sq_qinfo(v_parent_qinfo); + hifc_init_parent_ctx_sqc_qinfo(v_hba, v_parent_qinfo); + hifc_init_parent_ctx_srq_qinfo(v_hba, v_parent_qinfo); + hifc_init_parent_rsvd_qinfo(v_parent_qinfo); + + /* Initialize Software Section */ + hifc_init_parent_sw_section_info(v_hba, v_parent_qinfo); +} + +unsigned int hifc_get_rport_maped_cmd_scqn(void *phba, unsigned int rport_index) +{ + unsigned int cmd_scqn_local = 0; + struct hifc_hba_s *hba = (struct hifc_hba_s *)phba; + + cmd_scqn_local = HIFC_RPORTID_TO_CMD_SCQN(rport_index); + + return hba->scq_info[cmd_scqn_local].scqn; +} + +/** + * hifc_get_rport_maped_sts_scqn - Obtains the SCQ channel of RPort that is used + * to send STS. + * @v_hba: hba handle + * @rport_index: rport index + * @Return: related scqn value with rport index + */ +unsigned int hifc_get_rport_maped_sts_scqn(void *phba, unsigned int rport_index) +{ + unsigned int sts_scqn_local = 0; + struct hifc_hba_s *hba = (struct hifc_hba_s *)phba; + + sts_scqn_local = HIFC_RPORTID_TO_STS_SCQN(rport_index); + + return hba->scq_info[sts_scqn_local].scqn; +} + +void hifc_map_shared_queue_qid( + struct hifc_hba_s *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info, + unsigned int rport_index) +{ + unsigned int cmd_scqn_local = 0; + unsigned int sts_scqn_local = 0; + + /* The SCQ is used for each connection based on the balanced + * distribution of commands and responses + */ + cmd_scqn_local = HIFC_RPORTID_TO_CMD_SCQN(rport_index); + sts_scqn_local = HIFC_RPORTID_TO_STS_SCQN(rport_index); + v_parent_queue_info->parent_cmd_scq_info.local_queue_id = + cmd_scqn_local; + v_parent_queue_info->parent_sts_scq_info.local_queue_id = + sts_scqn_local; + v_parent_queue_info->parent_cmd_scq_info.cqm_queue_id = + v_hba->scq_info[cmd_scqn_local].scqn; + v_parent_queue_info->parent_sts_scq_info.cqm_queue_id = + v_hba->scq_info[sts_scqn_local].scqn; + + /* Each session share with immediate SRQ and ElsSRQ */ + v_parent_queue_info->parent_els_srq_info.local_queue_id = 0; + v_parent_queue_info->parent_els_srq_info.cqm_queue_id = + v_hba->els_srq_info.srqn; + + /* Allocate fcp data cos value */ + v_parent_queue_info->queue_data_cos = hifc_map_fcp_data_cos(v_hba); + + /* Allocate Parent SQ vPort */ + v_parent_queue_info->parent_sq_info.vport_id += + v_parent_queue_info->queue_vport_id; +} + +unsigned int hifc_alloc_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rport_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + if (!hba->parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) cannot find parent queue pool", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + if (v_rport_info->rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate parent resource failed, invlaid rport index(0x%x),rport nportid(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id); + + return UNF_RETURN_ERROR; + } + + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[v_rport_info->rport_index]; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + if (v_parent_queue_info->offload_state != HIFC_QUEUE_STATE_FREE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate parent resource failed, invlaid rport index(0x%x),rport nportid(0x%x), offload state(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id, + v_parent_queue_info->offload_state); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + return UNF_RETURN_ERROR; + } + + v_parent_queue_info->offload_state = HIFC_QUEUE_STATE_INITIALIZED; + + /* Create Parent Context and Link List SQ */ + ret = hifc_alloc_parent_sq(hba, v_parent_queue_info, v_rport_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "Port(0x%x) alloc sq resoure failed.rport index(0x%x),rport nportid(0x%x).", + hba->port_cfg.port_id, v_rport_info->rport_index, + v_rport_info->nport_id); + + v_parent_queue_info->offload_state = HIFC_QUEUE_STATE_FREE; + hifc_invalid_parent_sq(&v_parent_queue_info->parent_sq_info); + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + return UNF_RETURN_ERROR; + } + + /* Allocate the corresponding queue xid to each parent */ + hifc_map_shared_queue_qid(hba, v_parent_queue_info, + v_rport_info->rport_index); + + /* Initialize Parent Context, including hardware area and ucode area */ + hifc_init_parent_ctx(v_hba, v_parent_queue_info); + + spin_unlock_irqrestore(&v_parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) allocate parent sq success,rport index(0x%x),rport nportid(0x%x),context id(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.context_id); + + return ret; +} + +unsigned int hifc_free_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long flag = 0; + unsigned long rst_flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + enum hifc_session_reset_mode_e mode = + HIFC_SESS_RST_DELETE_IO_CONN_BOTH; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rport_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + if (!hba->parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) cannot find parent queue pool", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + /* get parent queue info (by rport index) */ + if (v_rport_info->rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) free parent resource failed, invlaid rport_index(%u) rport_nport_id(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id); + + return UNF_RETURN_ERROR; + } + v_parent_queue_info = &hba->parent_queue_mgr->parent_queues[v_rport_info->rport_index]; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + /* 1. for has been offload */ + if (v_parent_queue_info->offload_state == HIFC_QUEUE_STATE_OFFLOADED) { + v_parent_queue_info->offload_state = + HIFC_QUEUE_STATE_DESTROYING; + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + /* set reset state, in order to prevent I/O in_SQ */ + spin_lock_irqsave( + &v_parent_queue_info->parent_sq_info.parent_sq_enqueue_lock, + rst_flag); + v_parent_queue_info->parent_sq_info.sq_in_sess_rst = UNF_TRUE; + spin_unlock_irqrestore( + &v_parent_queue_info->parent_sq_info.parent_sq_enqueue_lock, + rst_flag); + + /* check pcie device state */ + if (HIFC_HBA_NOT_PRESENT(hba)) { + HIFC_TRACE( + UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) hba is not present, free directly. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + hifc_free_parent_queue_info(hba, v_parent_queue_info); + return RETURN_OK; + } + + v_parent_queue_info->parent_sq_info.del_start_jiff = jiffies; + (void)queue_delayed_work( + hba->work_queue, + &v_parent_queue_info->parent_sq_info.del_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SQ_DEL_STAGE_TIMEOUT_MS)); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to reset parent session, rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + /* Forcibly set both mode */ + mode = HIFC_SESS_RST_DELETE_IO_CONN_BOTH; + ret = hifc_send_session_rst_cmd(v_hba, v_parent_queue_info, + mode); + + return ret; + } else if (v_parent_queue_info->offload_state == + HIFC_QUEUE_STATE_INITIALIZED) { + /* 2. for resource has been alloc, but not offload */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) parent sq is not offloaded, free directly. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + hifc_free_parent_queue_info(hba, v_parent_queue_info); + + return RETURN_OK; + } else if (v_parent_queue_info->offload_state == + HIFC_QUEUE_STATE_OFFLOADING) { + /* 3. for driver has offloading CMND to uCode */ + hifc_push_destroy_parent_queue_sqe(v_hba, + v_parent_queue_info, + v_rport_info); + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) parent sq is offloading, push to delay free. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + return RETURN_OK; + } else { + /* other state */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) parent sq is not created, do not need free state(0x%x) rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_parent_queue_info->offload_state, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + return RETURN_OK; + } +} + +void hifc_free_parent_queue_mgr(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + unsigned int index = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + hba = (struct hifc_hba_s *)v_hba; + + if (!hba->parent_queue_mgr) + return; + parent_queue_mgr = hba->parent_queue_mgr; + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + if (parent_queue_mgr->parent_queues[index].parent_ctx.virt_parent_ctx) + parent_queue_mgr->parent_queues[index].parent_ctx.virt_parent_ctx = NULL; + } + + if (parent_queue_mgr->parent_sq_buf_list.buflist) { + for (index = 0; + index < parent_queue_mgr->parent_sq_buf_list.buf_num; + index++) { + if (parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr != 0) { + pci_unmap_single( + hba->pci_dev, + parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr, + parent_queue_mgr->parent_sq_buf_list.buf_size, + DMA_BIDIRECTIONAL); + parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr = 0; + } + if (parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr) { + kfree(parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr); + parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr = NULL; + } + } + + kfree(parent_queue_mgr->parent_sq_buf_list.buflist); + parent_queue_mgr->parent_sq_buf_list.buflist = NULL; + } + + vfree(parent_queue_mgr); + hba->parent_queue_mgr = NULL; +} + +void hifc_free_parent_queues(void *v_hba) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + hba = (struct hifc_hba_s *)v_hba; + parent_queue_mgr = hba->parent_queue_mgr; + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + if (parent_queue_mgr->parent_queues[index].offload_state == + HIFC_QUEUE_STATE_DESTROYING) { + spin_unlock_irqrestore( + &parent_queue_mgr->parent_queues[index].parent_queue_state_lock, + flag); + + (void)cancel_delayed_work_sync(&parent_queue_mgr->parent_queues[index].parent_sq_info.del_work); + (void)cancel_delayed_work_sync(&parent_queue_mgr->parent_queues[index].parent_sq_info.flush_done_tmo_work); + + /* free parent queue */ + hifc_free_parent_queue_info( + hba, + &parent_queue_mgr->parent_queues[index]); + continue; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } +} + +unsigned int hifc_alloc_parent_queue_mgr(void *v_hba) +{ + unsigned int index = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int alloc_idx; + unsigned int cur_buf_idx = 0; + unsigned int cur_buf_offset = 0; + unsigned int uiprtctxsize = sizeof(struct hifcoe_parent_context_s); + unsigned int buf_cnt_perhugebuf; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + parent_queue_mgr = (struct hifc_parent_queue_mgr_s *)vmalloc( + sizeof(struct hifc_parent_queue_mgr_s)); + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cannot allocate queue manager", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + hba->parent_queue_mgr = parent_queue_mgr; + memset(parent_queue_mgr, 0, sizeof(struct hifc_parent_queue_mgr_s)); + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_init(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock); + parent_queue_mgr->parent_queues[index].offload_state = + HIFC_QUEUE_STATE_FREE; + parent_queue_mgr->parent_queues[index].parent_sq_info.queue_header_original = NULL; + spin_lock_init(&parent_queue_mgr->parent_queues[index].parent_sq_info.parent_sq_enqueue_lock); + parent_queue_mgr->parent_queues[index].parent_cmd_scq_info.cqm_queue_id = INVALID_VALUE32; + parent_queue_mgr->parent_queues[index].parent_sts_scq_info.cqm_queue_id = INVALID_VALUE32; + parent_queue_mgr->parent_queues[index].parent_els_srq_info.cqm_queue_id = INVALID_VALUE32; + parent_queue_mgr->parent_queues[index].parent_sq_info.del_start_jiff = INVALID_VALUE64; + parent_queue_mgr->parent_queues[index].queue_vport_id = + hba->vpid_start; + } + + buf_total_size = uiprtctxsize * UNF_HIFC_MAXRPORT_NUM; + parent_queue_mgr->parent_sq_buf_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? BUF_LIST_PAGE_SIZE : + buf_total_size; + buf_cnt_perhugebuf = + parent_queue_mgr->parent_sq_buf_list.buf_size / uiprtctxsize; + buf_num = + UNF_HIFC_MAXRPORT_NUM % buf_cnt_perhugebuf ? + UNF_HIFC_MAXRPORT_NUM / buf_cnt_perhugebuf + 1 : + UNF_HIFC_MAXRPORT_NUM / buf_cnt_perhugebuf; + parent_queue_mgr->parent_sq_buf_list.buflist = (struct buff_list_s *) + kmalloc(buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + parent_queue_mgr->parent_sq_buf_list.buf_num = buf_num; + + if (!parent_queue_mgr->parent_sq_buf_list.buflist) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate QueuMgr buf list failed out of memory"); + goto free_parent_queue; + } + memset(parent_queue_mgr->parent_sq_buf_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr = + kmalloc(parent_queue_mgr->parent_sq_buf_list.buf_size, + GFP_KERNEL); + if (!parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr) + goto free_parent_queue; + memset( + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr, + 0, parent_queue_mgr->parent_sq_buf_list.buf_size); + + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr = + pci_map_single( + hba->pci_dev, + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr, + parent_queue_mgr->parent_sq_buf_list.buf_size, + DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error( + hba->pci_dev, + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr)) { + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr = 0; + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Map QueuMgr address failed"); + + goto free_parent_queue; + } + } + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + cur_buf_idx = index / buf_cnt_perhugebuf; + cur_buf_offset = uiprtctxsize * (index % buf_cnt_perhugebuf); + + parent_queue_mgr->parent_queues[index].parent_ctx.virt_parent_ctx = parent_queue_mgr->parent_sq_buf_list.buflist[cur_buf_idx].vaddr + cur_buf_offset; + parent_queue_mgr->parent_queues[index].parent_ctx.parent_ctx = parent_queue_mgr->parent_sq_buf_list.buflist[cur_buf_idx].paddr + cur_buf_offset; + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[EVENT]Allocate bufnum:%u,buf_total_size:%u", buf_num, + buf_total_size); + + return RETURN_OK; + +free_parent_queue: + hifc_free_parent_queue_mgr(hba); + return UNF_RETURN_ERROR; +} + +static void hifc_release_all_wqe_pages(struct hifc_hba_s *v_hba) +{ + unsigned int index; + struct hifc_sq_wqe_page_s *wpg = NULL; + + UNF_CHECK_VALID(0x2218, UNF_TRUE, v_hba, return); + + wpg = v_hba->sq_wpg_pool.wpg_pool_addr; + + for (index = 0; index < v_hba->sq_wpg_pool.wpg_cnt; index++) { + if (wpg->wpg_addr) { + dma_pool_free(v_hba->sq_wpg_pool.wpg_dma_pool, + wpg->wpg_addr, wpg->wpg_phy_addr); + wpg->wpg_addr = NULL; + wpg->wpg_phy_addr = 0; + } + + wpg++; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port[%u] free total %u wqepages", v_hba->port_index, + index); +} + +unsigned int hifc_alloc_parent_sq_wqe_page_pool(void *v_hba) +{ + unsigned int index = 0; + struct hifc_sq_wqe_page_pool_s *wpg_pool = NULL; + struct hifc_sq_wqe_page_s *wpg = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + wpg_pool = &hba->sq_wpg_pool; + + INIT_LIST_HEAD(&wpg_pool->list_free_wpg_pool); + spin_lock_init(&wpg_pool->wpg_pool_lock); + atomic_set(&wpg_pool->wpg_in_use, 0); + + /* Calculate the number of Wqe Page required in the pool */ + wpg_pool->wpg_size = wqe_page_size; + wpg_pool->wpg_cnt = (HIFC_MIN_WP_NUM * hba->image_count + + ((hba->exit_count * HIFC_SQE_SIZE) / + wpg_pool->wpg_size)); + + wpg_pool->wqe_per_wpg = wpg_pool->wpg_size / HIFC_SQE_SIZE; + + /* Craete DMA POOL */ + wpg_pool->wpg_dma_pool = dma_pool_create("hifc_wpg_pool", + &hba->pci_dev->dev, + wpg_pool->wpg_size, + HIFC_SQE_SIZE, 0); + if (!wpg_pool->wpg_dma_pool) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Cannot allocate SQ WqePage DMA pool"); + + goto out_create_dma_pool_err; + } + + /* Allocate arrays to record all WqePage addresses */ + wpg_pool->wpg_pool_addr = + (struct hifc_sq_wqe_page_s *) + vmalloc(wpg_pool->wpg_cnt * sizeof(struct hifc_sq_wqe_page_s)); + if (!wpg_pool->wpg_pool_addr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Allocate SQ WqePageAddr array failed"); + + goto out_alloc_wpg_array_err; + } + wpg = wpg_pool->wpg_pool_addr; + memset(wpg, 0, wpg_pool->wpg_cnt * sizeof(struct hifc_sq_wqe_page_s)); + + for (index = 0; index < wpg_pool->wpg_cnt; index++) { + /* Apply for WqePage from DMA POOL */ + wpg->wpg_addr = dma_pool_alloc(wpg_pool->wpg_dma_pool, + GFP_KERNEL, + (u64 *)&wpg->wpg_phy_addr); + if (!wpg->wpg_addr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, "[err]Dma pool allocated failed"); + + break; + } + + /* To ensure security, clear the memory */ + memset(wpg->wpg_addr, 0, wpg_pool->wpg_size); + + /* Add to the idle linked list */ + INIT_LIST_HEAD(&wpg->entry_wpg); + list_add_tail(&wpg->entry_wpg, + &wpg_pool->list_free_wpg_pool); + + wpg++; + } + /* ALL allocated successfully */ + if (index == wpg_pool->wpg_cnt) + return RETURN_OK; + + hifc_release_all_wqe_pages(hba); + vfree(wpg_pool->wpg_pool_addr); + wpg_pool->wpg_pool_addr = NULL; + +out_alloc_wpg_array_err: + dma_pool_destroy(wpg_pool->wpg_dma_pool); + wpg_pool->wpg_dma_pool = NULL; + +out_create_dma_pool_err: + return UNF_RETURN_ERROR; +} + +void hifc_free_parent_sq_wqe_page_pool(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(0x2220, UNF_TRUE, v_hba, return); + hba = (struct hifc_hba_s *)v_hba; + + hifc_release_all_wqe_pages(hba); + hba->sq_wpg_pool.wpg_cnt = 0; + + if (hba->sq_wpg_pool.wpg_pool_addr) { + vfree(hba->sq_wpg_pool.wpg_pool_addr); + hba->sq_wpg_pool.wpg_pool_addr = NULL; + } + + if (hba->sq_wpg_pool.wpg_dma_pool) { + dma_pool_destroy(hba->sq_wpg_pool.wpg_dma_pool); + hba->sq_wpg_pool.wpg_dma_pool = NULL; + } +} + +static inline void hifc_set_sq_wqe_owner_be(void *v_sqe) +{ + unsigned int *sqe_dw = (unsigned int *)v_sqe; + + /* Ensure that the write of WQE is complete */ + mb(); + sqe_dw[HIFC_SQE_SECOND_OBIT_DW_POS] |= HIFC_SQE_OBIT_SET_MASK_BE; + + /* Ensure that the write of Second Obit is complete */ + mb(); + sqe_dw[HIFC_SQE_FIRST_OBIT_DW_POS] |= HIFC_SQE_OBIT_SET_MASK_BE; +} + +static void hifc_free_head_wqe_page(struct hifc_parent_sq_info_s *v_sq) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_sq_wqe_page_s *sq_wpg = NULL; + struct list_head *entry_head_wqe_page = NULL; + unsigned long flag = 0; + + atomic_dec(&v_sq->wqe_page_cnt); + + hba = (struct hifc_hba_s *)v_sq->phba; + sq_wpg = HIFC_GET_SQ_HEAD(v_sq); + memset((void *)sq_wpg->wpg_addr, WQE_MARKER_0, + hba->sq_wpg_pool.wpg_size); + + spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); + entry_head_wqe_page = &sq_wpg->entry_wpg; + list_del(entry_head_wqe_page); + list_add_tail(entry_head_wqe_page, + &hba->sq_wpg_pool.list_free_wpg_pool); + + /* WqePage Pool counter */ + atomic_dec(&hba->sq_wpg_pool.wpg_in_use); + + spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); +} + +static unsigned int hifc_parent_sq_ring_door_bell( + struct hifc_parent_sq_info_s *v_sq) +{ + unsigned int ret = RETURN_OK; + int ravl; + unsigned short pmsn; + unsigned char pmsn_lo; + unsigned char pmsn_hi; + unsigned long long db_val_qw; + struct hifc_hba_s *hba; + struct hifc_parent_sq_db_s door_bell; + + hba = (struct hifc_hba_s *)v_sq->phba; + pmsn = v_sq->last_pmsn; + /* Obtain the low 8 Bit of PMSN */ + pmsn_lo = (unsigned char)(pmsn & 0xFF); + /* Obtain the high 8 Bit of PMSN */ + pmsn_hi = (unsigned char)((pmsn >> 8) & 0xFF); + door_bell.wd0.service_type = HIFC_LSW(v_sq->service_type); + door_bell.wd0.cos = hba->port_index; + door_bell.wd0.c = 0; + door_bell.wd0.arm = HIFC_DB_ARM_DISABLE; + door_bell.wd0.cntx_size = HIFC_CNTX_SIZE_T_256B; + door_bell.wd0.vport = v_sq->vport_id; + door_bell.wd0.xid = v_sq->context_id; + door_bell.wd1.sm_data = v_sq->cache_id; + door_bell.wd1.qid = v_sq->sq_queue_id; + door_bell.wd1.pi_hi = (unsigned int)pmsn_hi; + + if (unlikely(v_sq->cache_id == INVALID_VALUE32)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) SQ(0x%x) send DB error invalid cachedid", + hba->port_cfg.port_id, v_sq->context_id); + HIFC_HBA_STAT(hba, HIFC_STAT_PARENT_SQ_INVALID_CACHED_ID); + return UNF_RETURN_ERROR; + } + + /* Fill Doorbell Record */ + db_val_qw = v_sq->queue_header->doorbell_record; + db_val_qw &= (unsigned long long)(~(0xFFFFFFFF)); + db_val_qw |= (unsigned long long)((unsigned long long)pmsn << 16 | + pmsn); + v_sq->queue_header->doorbell_record = cpu_to_be64(db_val_qw); + + /* ring doorbell */ + db_val_qw = *(unsigned long long *)&door_bell; + hifc_cpu_to_big32(&db_val_qw, sizeof(db_val_qw)); + + ravl = cqm_ring_hardware_db(hba->hw_dev_handle, SERVICE_T_FC, pmsn_lo, + db_val_qw); + if (unlikely(ravl != CQM_SUCCESS)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]SQ(0x%x) send DB(0x%llx) failed", + v_sq->context_id, db_val_qw); + + ret = UNF_RETURN_ERROR; + } + + /* Doorbell success counter */ + atomic_inc(&v_sq->sq_dbl_cnt); + + return ret; +} + +unsigned int hifc_parent_sq_enqueue(struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_io_sqe) +{ + unsigned char wqe_type = 0; + unsigned int ret = RETURN_OK; + unsigned int addr_wd = INVALID_VALUE32; + unsigned int msn_wd = INVALID_VALUE32; + unsigned short link_wqe_msn = 0; + unsigned long flag = 0; + struct hifc_sq_wqe_page_s *new_wqe_page = NULL; + struct hifc_sq_wqe_page_s *tail_wpg = NULL; + struct hifcoe_sqe_s *sqe_in_wp = NULL; + struct hifc_link_wqe_s *link_wqe = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_sq->phba; + + wqe_type = (unsigned char)HIFC_GET_WQE_TYPE(v_io_sqe); + + /* Serial enqueue */ + spin_lock_irqsave(&v_sq->parent_sq_enqueue_lock, flag); + + /* If the SQ is invalid, the wqe is discarded */ + if (unlikely(!atomic_read(&v_sq->sq_valid))) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]SQ is invalid, reject wqe(0x%x)", wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * The heartbeat detection status is 0, which allows control sessions + * enqueuing + */ + if (unlikely((!hba->heart_status) && HIFC_WQE_IS_IO(v_io_sqe))) { + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Heart status is false"); + + return UNF_RETURN_ERROR; + } + + /* Ensure to be offloaded */ + if (unlikely(atomic_read(&v_sq->sq_cashed) != UNF_TRUE)) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + HIFC_HBA_STAT((struct hifc_hba_s *)v_sq->phba, + HIFC_STAT_PARENT_SQ_NOT_OFFLOADED); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]RPort(0x%x) Sq(0x%x) is not offloaded, reject wqe(0x%x)", + v_sq->rport_index, v_sq->context_id, wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * Whether the SQ is in the flush state. Temporarily allow the control + * sessions to enqueue. + */ + if (unlikely(v_sq->port_in_flush && HIFC_WQE_IS_IO(v_io_sqe))) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + HIFC_HBA_STAT((struct hifc_hba_s *)v_sq->phba, + HIFC_STAT_PARENT_IO_FLUSHED); + + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]SQ(0x%x) in flush, cmsn(0x%x)-pmsn(0x%x), reject wqe(0x%x)", + v_sq->context_id, + HIFC_GET_QUEUE_CMSN(v_sq), + v_sq->last_pmsn, wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * If the SQ is in the Seesion deletion state and is the WQE of the + * I/O path, the I/O failure is directly returned + */ + if (unlikely(v_sq->sq_in_sess_rst && HIFC_WQE_IS_IO(v_io_sqe))) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + HIFC_HBA_STAT((struct hifc_hba_s *)v_sq->phba, + HIFC_STAT_PARENT_IO_FLUSHED); + + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]SQ(0x%x) in session reset, reject wqe(0x%x)", + v_sq->context_id, wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * The PMSN position of the SQE that can be put into the SQE is LinkWqe. + * Apply to the CQM for a new page + */ + tail_wpg = HIFC_GET_SQ_TAIL(v_sq); + + if (v_sq->wqe_offset == v_sq->wqe_num_per_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_INFO, + "[info]RPort(0x%x) Sq(0x%x) add wqepage at pmsn(0x%x), WpgCnt(0x%x)", + v_sq->rport_index, v_sq->context_id, v_sq->last_pmsn, + atomic_read(&v_sq->wqe_page_cnt)); + + /* Add a new Wqe Page */ + new_wqe_page = hifc_add_one_wqe_page(v_sq); + if (unlikely(!new_wqe_page)) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, + wqe_type); + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, + flag); + + return UNF_RETURN_ERROR; + } + + /* + * Set the next address of LinkWqe to the newly applied WqePage + */ + link_wqe = (struct hifc_link_wqe_s *) + hifc_get_wqe_page_entry(tail_wpg, v_sq->wqe_offset); + addr_wd = HIFC_MSD(new_wqe_page->wpg_phy_addr); + link_wqe->next_page_addr_hi = cpu_to_be32(addr_wd); + addr_wd = HIFC_LSD(new_wqe_page->wpg_phy_addr); + link_wqe->next_page_addr_lo = cpu_to_be32(addr_wd); + + /* Fill LinkWqe msn */ + link_wqe_msn = HIFC_MSN_DEC(v_sq->last_pmsn); + msn_wd = be32_to_cpu(link_wqe->val_wd1); + msn_wd |= ((unsigned int)(link_wqe_msn & 0xffff)); + msn_wd |= (((unsigned int)(link_wqe_msn & 0x7fff)) << 16); + link_wqe->val_wd1 = cpu_to_be32(msn_wd); + + /* Set LinkWqe's Owner Bit valid */ + hifc_set_sq_wqe_owner_be(link_wqe); + + /* The newly added WqePage starts from 0 */ + v_sq->wqe_offset = 0; + + /* Point to the tail, Link Wqe */ + tail_wpg = HIFC_GET_SQ_TAIL(v_sq); + + /* Update counter */ + atomic_inc(&v_sq->wqe_page_cnt); + } + + /* Set pmsn of WQE Control Section, and set Owner-Bit invalid */ + hifc_build_wqe_owner_pmsn(&v_io_sqe->ctrl_sl, !v_sq->last_pi_owner, + v_sq->last_pmsn); + + /* Port WQE send counter */ + HIFC_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + + /* + * Set Done Bit of WQE, convert Control and Task Section to big endian + */ + hifc_convert_parent_wqe_to_big_endian(v_io_sqe); + + /* + * Find the position of the pointer that the SQE is placed in the + * WQEPAGE + */ + sqe_in_wp = (struct hifcoe_sqe_s *) + hifc_get_wqe_page_entry(tail_wpg, v_sq->wqe_offset); + + /* Copy sqe from the local memory to WqePage */ + memcpy(sqe_in_wp, v_io_sqe, sizeof(struct hifcoe_sqe_s)); + + hifc_set_sq_wqe_owner_be(sqe_in_wp); + + /* ring DoorBell */ + ret = hifc_parent_sq_ring_door_bell(v_sq); + if (unlikely(ret != RETURN_OK)) + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + + /* Update the count of the next SQE enqueuing */ + v_sq->wqe_offset += 1; + v_sq->last_pmsn = HIFC_MSN_INC(v_sq->last_pmsn); + + /* sq_wqe_cnt is updated for SQ statistics */ + atomic_inc(&v_sq->sq_wqe_cnt); + atomic_inc(&v_sq->sqe_minus_cqe_cnt); + HIFC_SQ_IO_STAT(v_sq, wqe_type); + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return ret; +} + +static int hifc_msn_in_wqe_page(unsigned int start_msn, unsigned int end_msn, + unsigned int cur_msn) +{ + int ret = UNF_TRUE; + + if (end_msn >= start_msn) { + if ((cur_msn < start_msn) || (cur_msn > end_msn)) + ret = UNF_FALSE; + else + ret = UNF_TRUE; + + } else { + if ((cur_msn > end_msn) && (cur_msn < start_msn)) + ret = UNF_FALSE; + else + ret = UNF_TRUE; + } + + return ret; +} + +void hifc_free_sq_wqe_page(struct hifc_parent_sq_info_s *v_sq, + unsigned int cur_msn) +{ + unsigned short wpg_start_cmsn = 0; + unsigned short wpg_end_cmsn = 0; + int wqe_page_in_use; + + /* If there is only zero or one Wqe Page, no release is required */ + if (atomic_read(&v_sq->wqe_page_cnt) <= HIFC_MIN_WP_NUM) + return; + + /* + * Check whether the current MSN is within the MSN range covered + * by the WqePage + */ + wpg_start_cmsn = v_sq->head_start_cmsn; + wpg_end_cmsn = v_sq->head_end_cmsn; + wqe_page_in_use = hifc_msn_in_wqe_page(wpg_start_cmsn, + wpg_end_cmsn, cur_msn); + + /* + * If the value of CMSN is within the current Wqe Page, no release is + * required + */ + if (wqe_page_in_use == UNF_TRUE) + return; + /* Free WqePage */ + hifc_free_head_wqe_page(v_sq); + + /* Obtain the start MSN of the next WqePage */ + wpg_start_cmsn = HIFC_MSN_INC(wpg_end_cmsn); + + /* obtain the end MSN of the next WqePage */ + wpg_end_cmsn = HIFC_GET_WP_END_CMSN(wpg_start_cmsn, + v_sq->wqe_num_per_buf); + + /* Set new MSN range */ + v_sq->head_start_cmsn = wpg_start_cmsn; + v_sq->head_end_cmsn = wpg_end_cmsn; +} + +static void hifc_update_sq_wqe_completion_stat( + struct hifc_parent_sq_info_s *v_sq, + union hifcoe_scqe_u *v_scqe) +{ + struct hifcoe_scqe_rcv_els_gs_rsp_s *els_gs_rsp = NULL; + + els_gs_rsp = (struct hifcoe_scqe_rcv_els_gs_rsp_s *)v_scqe; + + /* + * For the ELS/GS RSP intermediate frame and the CQE that is more + * than the ELS_GS_RSP_EXCH_CHECK_FAIL, no statistics are required + */ + if (unlikely(HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_RSP) || + (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_GS_RSP)) { + if (!els_gs_rsp->wd3.end_rsp || !HIFC_SCQE_ERR_TO_CM(v_scqe)) + return; + } + + /* + * When the SQ statistics are updated, the PlogiAcc or PlogiAccSts + * that is implicitly unloaded will enter here, and one more CQE count + * is added + */ + atomic_inc(&v_sq->sq_cqe_cnt); + atomic_dec(&v_sq->sqe_minus_cqe_cnt); + HIFC_SQ_IO_STAT(v_sq, HIFC_GET_SCQE_TYPE(v_scqe)); +} + +unsigned int hifc_reclaim_sq_wqe_page(void *v_hba, union hifcoe_scqe_u *v_scqe) +{ + unsigned int cur_msn = 0; + unsigned int rport_index = INVALID_VALUE32; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_sq_info_s *sq = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long state_lock_flag = 0; + + hba = (struct hifc_hba_s *)v_hba; + rport_index = HIFC_GET_SCQE_CONN_ID(v_scqe); + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) do not have rport index: 0x%x", + hba->port_cfg.port_id, rport_index); + + return UNF_RETURN_ERROR; + } + + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[rport_index]; + sq = &v_parent_queue_info->parent_sq_info; + /* If there is only zero or one Wqe Page, no release is required */ + if (atomic_read(&sq->wqe_page_cnt) <= HIFC_MIN_WP_NUM) { + hifc_update_sq_wqe_completion_stat(sq, v_scqe); + return RETURN_OK; + } else { + spin_lock_irqsave( + &v_parent_queue_info->parent_queue_state_lock, + state_lock_flag); + + if (v_parent_queue_info->offload_state == + HIFC_QUEUE_STATE_FREE) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) already released, no need to reclaim sq wqepage", + hba->port_cfg.port_id, rport_index); + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + state_lock_flag); + + return RETURN_OK; + } + + cur_msn = HIFC_GET_QUEUE_CMSN(sq); + hifc_free_sq_wqe_page(sq, cur_msn); + hifc_update_sq_wqe_completion_stat(sq, v_scqe); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + state_lock_flag); + + return RETURN_OK; + } +} + +struct hifc_parent_queue_info_s *hifc_find_parent_queue_info_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int rport_index = 0; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + rport_index = v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + + if (unlikely(rport_index >= UNF_HIFC_MAXRPORT_NUM)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR, + "[warn]Port(0x%x) send pkg sid_did(0x%x_0x%x), but uplevel allocate invalid rport index: 0x%x", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did, rport_index); + + return NULL; + } + + /* parent -->> session */ + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[rport_index]; + + return v_parent_queue_info; +} + +struct hifc_parent_queue_info_s *hifc_find_parent_queue_info_by_id( + struct hifc_hba_s *v_hba, + unsigned int v_local_id, + unsigned int v_remote_id) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + + parent_queue_mgr = v_hba->parent_queue_mgr; + if (!parent_queue_mgr) + return NULL; + + /* rport_number -->> parent_number -->> session_number */ + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + /* local_id & remote_id & offload */ + if ((v_local_id == parent_queue_mgr->parent_queues[index].parent_sq_info.local_port_id) && + (v_remote_id == parent_queue_mgr->parent_queues[index].parent_sq_info.remote_port_id) && + (parent_queue_mgr->parent_queues[index].offload_state == + HIFC_QUEUE_STATE_OFFLOADED)) { + v_parent_queue_info = + &parent_queue_mgr->parent_queues[index]; + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + return v_parent_queue_info; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } + + return NULL; +} + +struct hifc_parent_queue_info_s *hifc_find_offload_parent_queue( + void *v_hba, + unsigned int v_local_id, + unsigned int v_remote_id, + unsigned int v_rport_index) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_hba_s *hba = v_hba; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + + parent_queue_mgr = hba->parent_queue_mgr; + if (!parent_queue_mgr) + return NULL; + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + if (index == v_rport_index) + continue; + + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + if ((v_local_id == parent_queue_mgr->parent_queues[index].parent_sq_info.local_port_id) && + (v_remote_id == parent_queue_mgr->parent_queues[index].parent_sq_info.remote_port_id) && + (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE) && + (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_INITIALIZED)) { + v_parent_queue_info = + &parent_queue_mgr->parent_queues[index]; + spin_unlock_irqrestore( + &parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + return v_parent_queue_info; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } + + return NULL; +} + +struct hifc_parent_sq_info_s *hifc_find_parent_sq_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct cqm_qpc_mpt_s *cqm_parent_ctx_obj = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + v_parent_queue_info = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (unlikely(!v_parent_queue_info)) { + v_parent_queue_info = hifc_find_parent_queue_info_by_id( + hba, + v_pkg->frame_head.csctl_sid & + UNF_NPORTID_MASK, + v_pkg->frame_head.rctl_did & + UNF_NPORTID_MASK); + if (!v_parent_queue_info) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", + hba->port_cfg.port_id, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + } + + cqm_parent_ctx_obj = v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj; + if (unlikely(!cqm_parent_ctx_obj)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x) with this rport has not alloc parent sq information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + return &v_parent_queue_info->parent_sq_info; +} + +struct hifc_parent_ctx_s *hifc_get_parnt_ctx_virt_addr_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + v_parent_queue_info = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!v_parent_queue_info) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + if ((!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj) || + (!v_parent_queue_info->parent_ctx.virt_parent_ctx)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), but this rport have not allocate a parent context yet", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + if (!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), but cqm have not allocate a parent context yet", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + return &v_parent_queue_info->parent_ctx; +} + +unsigned int hifc_check_all_parent_queue_free(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + + parent_queue_mgr = v_hba->parent_queue_mgr; + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) get a null parent queue mgr", + v_hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave( + &parent_queue_mgr->parent_queues[index].parent_queue_state_lock, + flag); + + if (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE) { + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + return UNF_RETURN_ERROR; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } + + return RETURN_OK; +} + +unsigned int hifc_get_parent_ctx_xid_by_pkg(void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + v_parent_queue_info = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!v_parent_queue_info) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return INVALID_VALUE32; + } + + if ((!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj) || + (!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x),but this rport have not allocate a parent context yet", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return INVALID_VALUE32; + } + + return v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj->xid; +} + +static void hifc_flush_specific_scq(struct hifc_hba_s *v_hba, + unsigned int index) +{ + /* + * The software interrupt is scheduled and processed during the second + * timeout period + */ + struct hifc_scq_info_s *scq_info = NULL; + unsigned int flush_done_time = 0; + + scq_info = &v_hba->scq_info[index]; + atomic_set(&scq_info->flush_state, HIFC_QUEUE_FLUSH_DOING); + tasklet_schedule(&scq_info->tasklet); + + /* + * Wait for a maximum of 2 seconds. If the SCQ soft interrupt is not + * scheduled within 2 seconds, only timeout is returned + */ + while ((atomic_read(&scq_info->flush_state) != HIFC_QUEUE_FLUSH_DONE) && + (flush_done_time < HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { + msleep(HIFC_QUEUE_FLUSH_WAIT_MS); + flush_done_time += HIFC_QUEUE_FLUSH_WAIT_MS; + tasklet_schedule(&scq_info->tasklet); + } + + if (atomic_read(&scq_info->flush_state) != HIFC_QUEUE_FLUSH_DONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) special scq(0x%x) flush timeout", + v_hba->port_cfg.port_id, index); + } +} + +static void hifc_flush_cmd_scq(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + + for (index = HIFC_CMD_SCQN_START; index < HIFC_SESSION_SCQ_NUM; + index += HIFC_SCQS_PER_SESSION) + hifc_flush_specific_scq(v_hba, index); +} + +static void hifc_flush_sts_scq(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + + /* for each STS SCQ */ + for (index = HIFC_STS_SCQN_START; index < HIFC_SESSION_SCQ_NUM; + index += HIFC_SCQS_PER_SESSION) + hifc_flush_specific_scq(v_hba, index); +} + +static void hifc_flush_all_scq(struct hifc_hba_s *v_hba) +{ + hifc_flush_cmd_scq(v_hba); + hifc_flush_sts_scq(v_hba); + /* Flush Default SCQ */ + hifc_flush_specific_scq(v_hba, HIFC_SESSION_SCQ_NUM); +} + +static void hifc_wait_root_rq_empty(struct hifc_hba_s *v_hba) +{ + unsigned int q_index; + struct hifc_root_info_s *root_info; + struct hifc_root_rq_info_s *rq_info; + unsigned int flush_done_time = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + root_info = &v_hba->root_info; + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + atomic_set(&rq_info->flush_state, HIFC_QUEUE_FLUSH_DOING); + flush_done_time = 0; + + while ((atomic_read(&rq_info->flush_state) != + HIFC_QUEUE_FLUSH_DONE) && + (flush_done_time < HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { + msleep(HIFC_QUEUE_FLUSH_WAIT_MS); + flush_done_time += HIFC_QUEUE_FLUSH_WAIT_MS; + tasklet_schedule(&rq_info->tasklet); + } + + if (atomic_read(&rq_info->flush_state) != + HIFC_QUEUE_FLUSH_DONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, + UNF_WARN, + "[warn]Port(0x%x) RootRq(0x%x) flush timeout", + v_hba->port_cfg.port_id, q_index); + } + } +} + +void hifc_wait_root_sq_empty(void *v_hba) +{ +#define HIFC_WAIT_ROOT_SQ_EMPTY_TIMEOUT_MS (50) + + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int start_wait_time = 0; + int time_out = UNF_FALSE; + + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + + /* + * Traverse all root sq (just one) in the HBA and change the status to + * in_flush + */ + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + if (!sq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) root sq(0x%x) info is NULL", + hba->port_cfg.port_id, q_index); + continue; + } + + start_wait_time = 0; + time_out = UNF_TRUE; + + /* Wait 1 second to check whether the Root Sq is empty */ + do { + if (hifc_root_sq_is_empty(sq_info)) { + time_out = UNF_FALSE; + break; + } + msleep(20); + start_wait_time++; + } while (start_wait_time < HIFC_WAIT_ROOT_SQ_EMPTY_TIMEOUT_MS); + + if (time_out) { + HIFC_HBA_STAT(hba, HIFC_STAT_SQ_WAIT_EMPTY); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) waiting for root sq(0x%x) empty timeout", + hba->port_cfg.port_id, q_index); + } + } +} + +void hifc_wait_all_queues_empty(struct hifc_hba_s *v_hba) +{ + hifc_wait_root_rq_empty(v_hba); + hifc_flush_all_scq(v_hba); +} + +void hifc_set_root_sq_flush_state(void *v_hba, int in_flush) +{ + unsigned int q_index = 0; + unsigned long flags = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + + /* + * for each root sq (so far, just one), + * set root sq state with been flushing or flush done + */ + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + if (!sq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) root sq(0x%x) info is NULL", + hba->port_cfg.port_id, q_index); + continue; + } + + spin_lock_irqsave(&sq_info->root_sq_spin_lock, flags); + sq_info->in_flush = in_flush; + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flags); + } +} + +void hifc_set_rport_flush_state(void *v_hba, int in_flush) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + parent_queue_mgr = hba->parent_queue_mgr; + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) parent queue manager is empty", + hba->port_cfg.port_id); + return; + } + + /* + * for each HBA's R_Port(SQ), + * set state with been flushing or flush done + */ + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_sq_info.parent_sq_enqueue_lock, flag); + if (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE) { + parent_queue_mgr->parent_queues[index].parent_sq_info.port_in_flush = in_flush; + } + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_sq_info.parent_sq_enqueue_lock, flag); + } +} + +/** + * hifc_clear_fetched_sq_wqe - Inform the chip to clear the WQE that is being + * processed by the chip. + * @v_hba : hba handle + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_clear_fetched_sq_wqe(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + union hifc_cmdqe_u cmdqe; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(0x4909, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + hba = (struct hifc_hba_s *)v_hba; + + /* + * The ROOT SQ cannot control the WQE in the empty queue of the ROOT SQ. + * Therefore, the ROOT SQ does not enqueue the WQE after the hardware + * obtains the. Link down after the wait mode is used. Therefore, + * the WQE of the hardware driver needs to enter the WQE of the queue + * after the Link down of the Link down is reported. + */ + hifc_wait_root_sq_empty(v_hba); + + memset(&cmdqe, 0, sizeof(union hifc_cmdqe_u)); + hifc_build_cmdqe_common(&cmdqe, HIFCOE_TASK_T_BUFFER_CLEAR, 0); + cmdqe.buffer_clear.wd1.rx_id_start = hba->exit_base; + cmdqe.buffer_clear.wd1.rx_id_end = + hba->exit_base + hba->exit_count - 1; + cmdqe.buffer_clear.scqn = hba->default_scqn; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) start clear all fetched wqe in start(0x%x) - end(0x%x) scqn(0x%x) stage(0x%x)", + hba->port_cfg.port_id, + cmdqe.buffer_clear.wd1.rx_id_start, + cmdqe.buffer_clear.wd1.rx_id_end, + cmdqe.buffer_clear.scqn, + hba->q_set_stage); + + /* Send BUFFER_CLEAR command via ROOT CMDQ */ + ret = hifc_root_cmdq_enqueue(hba, &cmdqe, + sizeof(cmdqe.buffer_clear)); + + return ret; +} + +/** + * hifc_clear_pending_sq_wqe -Inform the chip to clear the Pending Sq WQE that + * is being processed by the chip. + * @v_hba: hba handle + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_clear_pending_sq_wqe(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rport_index = 0; + unsigned int entry_cnt = 0; + unsigned int entry_cnt_max = 0; + unsigned int next_clr_sq = 0; + unsigned int cmdqe_len = 0; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *parent_qinfo; + struct hifcoe_cmdqe_flush_sq_info_s *entry = NULL; + union hifc_cmdqe_u *cmdqe = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + cmdqe = (union hifc_cmdqe_u *)kmalloc(HIFC_CMDQE_BUFF_LEN_MAX, + GFP_ATOMIC); + if (!cmdqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL, + "[err]Port(0x%x) malloc flush sq information buffer cmnd failed, stage(0x%x)", + hba->port_cfg.port_id, hba->q_set_stage); + + return UNF_RETURN_ERROR; + } + + memset(cmdqe, 0, HIFC_CMDQE_BUFF_LEN_MAX); + hifc_build_cmdqe_common(cmdqe, HIFCOE_TASK_T_FLUSH_SQ, 0); + cmdqe->flush_sq.wd0.wqe_type = HIFCOE_TASK_T_FLUSH_SQ; + cmdqe->flush_sq.wd0.sq_qid = HIFC_LSW(hba->default_sq_id); + cmdqe->flush_sq.wd1.scqn = HIFC_LSW(hba->default_scqn); + cmdqe->flush_sq.wd1.port_id = hba->port_index; + + /* + * The CMDQE can contain a maximum of Clear 253 SQ information at a time + */ + entry_cnt = 0; + entry_cnt_max = (HIFC_CMDQE_BUFF_LEN_MAX - sizeof(cmdqe->flush_sq)) / + sizeof(*entry); + entry = cmdqe->flush_sq.sq_info_entry; + next_clr_sq = hba->next_clearing_sq; + + for (rport_index = next_clr_sq; rport_index < UNF_HIFC_MAXRPORT_NUM; + rport_index++) { + parent_qinfo = + &hba->parent_queue_mgr->parent_queues[rport_index]; + + spin_lock_irqsave(&parent_qinfo->parent_queue_state_lock, flag); + if (HIFC_RPORT_FLUSH_NOT_NEEDED(parent_qinfo)) { + spin_unlock_irqrestore( + &parent_qinfo->parent_queue_state_lock, flag); + next_clr_sq++; + continue; + } + entry->xid = parent_qinfo->parent_sq_info.context_id; + entry->cid = parent_qinfo->parent_sq_info.cache_id; + spin_unlock_irqrestore(&parent_qinfo->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) RPort[0x%x] flush pending SQ Entry: xid=0x%x, cid=0x%x", + hba->port_cfg.port_id, rport_index, + entry->xid, entry->cid); + + entry_cnt++; + entry++; + next_clr_sq++; + + if (entry_cnt >= entry_cnt_max) + break; + } + + if (entry_cnt == 0) { + /* If no SQ needs to be flushed, the Clear Done command is + * directly sent to the uP + */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_INFO, + "[info]Port(0x%x) non SQ need flush wqe, clear done directly, stage (0x%x)", + hba->port_cfg.port_id, hba->q_set_stage); + + /* Sends the Clear Done command to the chip */ + ret = hifc_clear_sq_wqe_done(hba); + goto free_flush_sq_cmdqe; + } + + hba->next_clearing_sq = next_clr_sq; + cmdqe->flush_sq.wd0.entry_count = entry_cnt; + + if (rport_index == UNF_HIFC_MAXRPORT_NUM) + cmdqe->flush_sq.wd1.last_wqe = 1; + else + cmdqe->flush_sq.wd1.last_wqe = 0; + + /* Clear pending Queue */ + cmdqe_len = (unsigned int)(sizeof(cmdqe->flush_sq) + + entry_cnt * sizeof(*entry)); + ret = hifc_root_cmdq_enqueue(hba, cmdqe, (unsigned short)cmdqe_len); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) clear total 0x%x SQ in this CMDQE(last=%u), stage (0x%x)", + hba->port_cfg.port_id, entry_cnt, + cmdqe->flush_sq.wd1.last_wqe, hba->q_set_stage); + +free_flush_sq_cmdqe: + kfree(cmdqe); + + return ret; +} + +unsigned int hifc_wait_queue_set_flush_done(struct hifc_hba_s *v_hba) +{ + unsigned int flush_done_time = 0; + unsigned int ret = RETURN_OK; + + while ((v_hba->q_set_stage != HIFC_QUEUE_SET_STAGE_FLUSHDONE) && + (flush_done_time < HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { + msleep(HIFC_QUEUE_FLUSH_WAIT_MS); + flush_done_time += HIFC_QUEUE_FLUSH_WAIT_MS; + } + + if (v_hba->q_set_stage != HIFC_QUEUE_SET_STAGE_FLUSHDONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) queue sets flush timeout with stage(0x%x)", + v_hba->port_cfg.port_id, v_hba->q_set_stage); + + ret = UNF_RETURN_ERROR; + } + + return ret; +} + +static void hifc_disable_all_scq_schedule(struct hifc_hba_s *v_hba) +{ + struct hifc_scq_info_s *scq_info = NULL; + unsigned int index = 0; + + for (index = 0; index < HIFC_TOTAL_SCQ_NUM; index++) { + scq_info = &v_hba->scq_info[index]; + tasklet_disable(&scq_info->tasklet); + } +} + +static void hifc_disable_root_rq_schedule(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + root_info = &v_hba->root_info; + + for (index = 0; index < root_info->rq_num; index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + index; + tasklet_disable(&rq_info->tasklet); + } +} + +void hifc_disable_queues_dispatch(struct hifc_hba_s *v_hba) +{ + hifc_disable_root_rq_schedule(v_hba); + hifc_disable_all_scq_schedule(v_hba); +} + +static void hifc_enable_root_rq_schedule(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + root_info = &v_hba->root_info; + + for (index = 0; index < root_info->rq_num; index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + index; + tasklet_enable(&rq_info->tasklet); + } +} + +static void hifc_enable_all_scq_schedule(struct hifc_hba_s *v_hba) +{ + struct hifc_scq_info_s *scq_info = NULL; + unsigned int index = 0; + + for (index = 0; index < HIFC_TOTAL_SCQ_NUM; index++) { + scq_info = &v_hba->scq_info[index]; + tasklet_enable(&scq_info->tasklet); + } +} + +void hifc_enable_queues_dispatch(void *v_hba) +{ + hifc_enable_root_rq_schedule((struct hifc_hba_s *)v_hba); + hifc_enable_all_scq_schedule((struct hifc_hba_s *)v_hba); +} + +void hifc_clear_els_srq(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_srq_info_s *srq_info = NULL; + + srq_info = &v_hba->els_srq_info; + + spin_lock_irqsave(&srq_info->srq_spin_lock, flag); + if ((srq_info->enable == UNF_FALSE) || + (srq_info->state == HIFC_CLEAN_DOING)) { + spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); + + return; + } + srq_info->enable = UNF_FALSE; + srq_info->state = HIFC_CLEAN_DOING; + spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); + + hifc_send_clear_srq_cmd(v_hba, &v_hba->els_srq_info); + + /* wait for uCode to clear SRQ context, the timer is 30S */ + while ((srq_info->state != HIFC_CLEAN_DONE) && (index < 60)) { + msleep(500); + index++; + } + + if (srq_info->state != HIFC_CLEAN_DONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]HIFC Port(0x%x) clear els srq timeout", + v_hba->port_cfg.port_id); + } +} + +unsigned int hifc_wait_all_parent_queue_free(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + unsigned int ret = UNF_RETURN_ERROR; + + do { + ret = hifc_check_all_parent_queue_free(v_hba); + if (ret == RETURN_OK) + break; + + index++; + msleep(20); + } while (index < 1500); + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[warn]Port(0x%x) wait all parent queue state free timeout", + v_hba->port_cfg.port_id); + } + + return ret; +} + +void hifc_queue_pre_process(void *v_hba, int v_clean) +{ +#define HIFC_WAIT_LINKDOWN_EVENT_MS 500 + + /* From port reset & port remove */ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + /* 1. Wait for 2s and wait for QUEUE to be FLUSH Done. */ + if (hifc_wait_queue_set_flush_done(hba) != RETURN_OK) { + /* + * During the process of removing the card, if the port is + * disabled and the flush done is not available, the chip is + * powered off or the pcie link is disconnected. In this case, + * you can proceed with the next step. + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]HIFC Port(0x%x) clean queue sets timeout", + hba->port_cfg.port_id); + } + + /* + * 2. Port remove: + * 2.1 free parent queue + * 2.2 clear & destroy ELS/SIRT SRQ + */ + if (v_clean == UNF_TRUE) { + if (hifc_wait_all_parent_queue_free(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]HIFC Port(0x%x) free all parent queue timeout", + hba->port_cfg.port_id); + } + + /* clear & than destroy ELS SRQ */ + hifc_clear_els_srq(hba); + } + + msleep(HIFC_WAIT_LINKDOWN_EVENT_MS); + + /* + * 3. The internal resources of the port chip are flush done. However, + * there may be residual scqe or rq in the queue. The scheduling is + * forcibly refreshed once. + */ + hifc_wait_all_queues_empty(hba); + + /* + * 4. Disable tasklet scheduling for upstream queues on the software + * layer + */ + hifc_disable_queues_dispatch(hba); +} + +unsigned int hifc_push_delay_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_offload_parent_queue, + struct hifc_root_sqe_s *v_sqe, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&v_offload_parent_queue->parent_queue_state_lock, + flag); + + if ((v_offload_parent_queue->offload_state != + HIFC_QUEUE_STATE_INITIALIZED) && + (v_offload_parent_queue->offload_state != HIFC_QUEUE_STATE_FREE)) { + memcpy(&v_offload_parent_queue->parent_sq_info.delay_sqe.sqe, + v_sqe, sizeof(struct hifc_root_sqe_s)); + v_offload_parent_queue->parent_sq_info.delay_sqe.start_jiff = + jiffies; + v_offload_parent_queue->parent_sq_info.delay_sqe.time_out = + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER]; + v_offload_parent_queue->parent_sq_info.delay_sqe.valid = + UNF_TRUE; + v_offload_parent_queue->parent_sq_info.delay_sqe.rport_index = + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + v_offload_parent_queue->parent_sq_info.delay_sqe.sid = + v_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK; + v_offload_parent_queue->parent_sq_info.delay_sqe.did = + v_pkg->frame_head.rctl_did & UNF_NPORTID_MASK; + + spin_unlock_irqrestore( + &v_offload_parent_queue->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) delay send ELS, OXID(0x%x), RXID(0x%x)", + ((struct hifc_hba_s *)v_hba)->port_cfg.port_id, + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX], + UNF_GET_OXID(v_pkg), UNF_GET_RXID(v_pkg)); + + return RETURN_OK; + } + + spin_unlock_irqrestore(&v_offload_parent_queue->parent_queue_state_lock, + flag); + + return UNF_RETURN_ERROR; +} + +void hifc_pop_delay_sqe(struct hifc_hba_s *v_hba, + struct hifc_delay_sqe_ctrl_info_s *v_sqe_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + unsigned int delay_rport_index = INVALID_VALUE32; + struct hifc_parent_queue_info_s *parent_queue = NULL; + enum hifc_parent_queue_state_e offload_state = + HIFC_QUEUE_STATE_DESTROYING; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + + /* + * According to the sequence, the rport index id is reported and then + * the sqe of the new link setup request is delivered. + */ + if (v_sqe_info->valid != UNF_TRUE) + return; + if (jiffies_to_msecs(jiffies - v_sqe_info->start_jiff) >= + v_sqe_info->time_out) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) pop delay root sqe failed, sqe start time 0x%llx, timeout value 0x%x", + v_hba->port_cfg.port_id, + v_sqe_info->start_jiff, + v_sqe_info->time_out); + } + + delay_rport_index = v_sqe_info->rport_index; + if (delay_rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) pop delay root sqe failed, rport index(0x%x) is invalid", + v_hba->port_cfg.port_id, + delay_rport_index); + + return; + } + + parent_queue = + &v_hba->parent_queue_mgr->parent_queues[delay_rport_index]; + + /* Before the root sq is delivered, check the status again to + * ensure that the initialization status is not uninstalled. Other + * states are not processed and are discarded directly. + */ + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + offload_state = parent_queue->offload_state; + + /* Before re-enqueuing the rootsq, check whether the offload status and + * connection information is consistent to prevent the old request from + * being sent after the connection status is changed. + */ + if ((offload_state == HIFC_QUEUE_STATE_INITIALIZED) && + (parent_queue->parent_sq_info.local_port_id == v_sqe_info->sid) && + (parent_queue->parent_sq_info.remote_port_id == v_sqe_info->did) && + HIFC_CHECK_XID_MATCHED( + parent_queue->parent_sq_info.context_id, + v_sqe_info->sqe.task_section.fc_dw4.parent_xid)) { + parent_queue->offload_state = HIFC_QUEUE_STATE_OFFLOADING; + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) pop up delay sqe to root sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + v_hba->port_cfg.port_id, + v_sqe_info->start_jiff, + v_sqe_info->time_out, + delay_rport_index, + offload_state); + + ret = hifc_root_sq_enqueue(v_hba, &v_sqe_info->sqe); + if (ret != RETURN_OK) { + spin_lock_irqsave( + &parent_queue->parent_queue_state_lock, flag); + + if (parent_queue->offload_state == + HIFC_QUEUE_STATE_OFFLOADING) + parent_queue->offload_state = offload_state; + + if (parent_queue->parent_sq_info.destroy_sqe.valid == + UNF_TRUE) { + memcpy( + &destroy_sqe_info, + &parent_queue->parent_sq_info.destroy_sqe, + sizeof(struct hifc_destroy_ctrl_info_s)); + + parent_queue->parent_sq_info.destroy_sqe.valid = + UNF_FALSE; + } + + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + + hifc_pop_destroy_parent_queue_sqe((void *)v_hba, + &destroy_sqe_info); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) pop up delay sqe to root sq fail, recover offload state 0x%x", + v_hba->port_cfg.port_id, + parent_queue->offload_state); + } + } else { + ret = UNF_RETURN_ERROR; + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, + flag); + } + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x pop delay root sqe failed, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + v_hba->port_cfg.port_id, + v_sqe_info->start_jiff, + v_sqe_info->time_out, + delay_rport_index, + offload_state); + } +} + +void hifc_push_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo, + struct unf_rport_info_s *v_rport_info) +{ + v_parent_qinfo->parent_sq_info.destroy_sqe.valid = UNF_TRUE; + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_index = + v_rport_info->rport_index; + v_parent_qinfo->parent_sq_info.destroy_sqe.time_out = + HIFC_SQ_DEL_STAGE_TIMEOUT_MS; + v_parent_qinfo->parent_sq_info.destroy_sqe.start_jiff = jiffies; + + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id = + v_rport_info->nport_id; + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index = + v_rport_info->rport_index; + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name = + v_rport_info->port_name; +} + +void hifc_pop_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_destroy_ctrl_info_s *v_destroy_sqe_info) +{ + struct hifc_hba_s *hba = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + unsigned int delay_rport_index = INVALID_VALUE32; + struct hifc_parent_queue_info_s *parent_queue = NULL; + enum hifc_parent_queue_state_e offload_state = + HIFC_QUEUE_STATE_DESTROYING; + + hba = (struct hifc_hba_s *)v_hba; + + if (v_destroy_sqe_info->valid != UNF_TRUE) + return; + + if (jiffies_to_msecs(jiffies - v_destroy_sqe_info->start_jiff) < + v_destroy_sqe_info->time_out) { + delay_rport_index = v_destroy_sqe_info->rport_index; + parent_queue = + &hba->parent_queue_mgr->parent_queues[delay_rport_index]; + + /* Before delivery, check the status again to ensure that the + * initialization status is not uninstalled. Other states are + * not processed and are discarded directly. + */ + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + + offload_state = parent_queue->offload_state; + if ((offload_state == HIFC_QUEUE_STATE_OFFLOADED) || + (offload_state == HIFC_QUEUE_STATE_INITIALIZED)) { + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port 0x%x pop up delay destroy parent sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + hba->port_cfg.port_id, + v_destroy_sqe_info->start_jiff, + v_destroy_sqe_info->time_out, + delay_rport_index, + offload_state); + ret = hifc_free_parent_resource( + hba, + &v_destroy_sqe_info->rport_info); + } else { + ret = UNF_RETURN_ERROR; + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + } + } + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x pop delay destroy parent sq failed, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, rport nport id 0x%x,offload state 0x%x", + hba->port_cfg.port_id, + v_destroy_sqe_info->start_jiff, + v_destroy_sqe_info->time_out, + delay_rport_index, + v_destroy_sqe_info->rport_info.nport_id, + offload_state); + } +} + +void hifc_free_parent_queue_info( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info) +{ + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rport_index = INVALID_VALUE32; + struct hifc_hba_s *hba = NULL; + struct hifc_delay_sqe_ctrl_info_s sqe_info; + + memset(&sqe_info, 0, sizeof(struct hifc_delay_sqe_ctrl_info_s)); + hba = (struct hifc_hba_s *)v_hba; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) begin to free parent sq, rport_index(0x%x)", + hba->port_cfg.port_id, + v_parent_queue_info->parent_sq_info.rport_index); + + if (v_parent_queue_info->offload_state == HIFC_QUEUE_STATE_FREE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[info]Port(0x%x) duplicate free parent sq, rport_index(0x%x)", + hba->port_cfg.port_id, + v_parent_queue_info->parent_sq_info.rport_index); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + return; + } + + if (v_parent_queue_info->parent_sq_info.delay_sqe.valid == UNF_TRUE) { + memcpy(&sqe_info, + &v_parent_queue_info->parent_sq_info.delay_sqe, + sizeof(struct hifc_delay_sqe_ctrl_info_s)); + } + + rport_index = v_parent_queue_info->parent_sq_info.rport_index; + + /* The Parent Contexe and SQ information is released. After + * initialization, the Parent Contexe and SQ information is associated + * with the sq in the queue of the parent + */ + hifc_free_parent_sq(hba, v_parent_queue_info); + + /* The initialization of all queue id is invalid */ + v_parent_queue_info->parent_cmd_scq_info.cqm_queue_id = INVALID_VALUE32; + v_parent_queue_info->parent_sts_scq_info.cqm_queue_id = INVALID_VALUE32; + v_parent_queue_info->parent_els_srq_info.cqm_queue_id = INVALID_VALUE32; + v_parent_queue_info->offload_state = HIFC_QUEUE_STATE_FREE; + + spin_unlock_irqrestore(&v_parent_queue_info->parent_queue_state_lock, + flag); + + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RELEASE_RPORT_INDEX, + (void *)&rport_index); + hifc_pop_delay_sqe(hba, &sqe_info); + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) free parent sq with rport_index(0x%x) failed", + hba->port_cfg.port_id, rport_index); + } +} + +void hifc_build_session_rst_wqe(void *v_hba, + struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_sqe, + enum hifc_session_reset_mode_e v_mode, + unsigned int scqn) +{ + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + /* + * The reset session command does not occupy xid. Therefore, + * 0xffff can be used to align with the microcode. + */ + v_sqe->ts_sl.task_type = HIFC_SQE_SESS_RST; + v_sqe->ts_sl.local_xid = 0xffff; + v_sqe->ts_sl.wd0.conn_id = (unsigned short)(v_sq->rport_index); + v_sqe->ts_sl.wd0.remote_xid = 0xffff; + + v_sqe->ts_sl.cont.reset_session.wd0.reset_exch_start = hba->exit_base; + v_sqe->ts_sl.cont.reset_session.wd0.reset_exch_end = hba->exit_base + + (hba->exit_count - 1); + v_sqe->ts_sl.cont.reset_session.wd1.reset_did = v_sq->remote_port_id; + v_sqe->ts_sl.cont.reset_session.wd1.mode = v_mode; + v_sqe->ts_sl.cont.reset_session.wd2.reset_sid = v_sq->local_port_id; + v_sqe->ts_sl.cont.reset_session.wd3.scqn = scqn; + + hifc_build_common_wqe_ctrls(&v_sqe->ctrl_sl, + sizeof(struct hifcoe_sqe_ts_s) / + HIFC_WQE_SECTION_CHUNK_SIZE); +} + +unsigned int hifc_send_session_rst_cmd( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info, + enum hifc_session_reset_mode_e v_mode) +{ + struct hifc_parent_sq_info_s *sq = NULL; + struct hifcoe_sqe_s rst_sess_sqe; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int sts_scqn = 0; + + memset(&rst_sess_sqe, 0, sizeof(struct hifcoe_sqe_s)); + sq = &v_parent_queue_info->parent_sq_info; + sts_scqn = ((struct hifc_hba_s *)v_hba)->default_scqn; + hifc_build_session_rst_wqe(v_hba, sq, &rst_sess_sqe, v_mode, sts_scqn); + + /* Run the sq command to issue the reset session command to the + * microcode, that is, the last command. + */ + ret = hifc_parent_sq_enqueue(sq, &rst_sess_sqe); + + return ret; +} + +void hifc_rcvd_els_from_srq_time_out(struct work_struct *work) +{ + struct hifc_hba_s *hba = NULL; + + hba = container_of(work, struct hifc_hba_s, delay_info.del_work.work); + + /* + * If the frame is not processed, the frame is pushed to the CM layer: + * The frame may have been processed when the root rq receives data. + */ + if (hba->delay_info.srq_delay_flag) { + hifc_rcv_els_cmnd( + hba, &hba->delay_info.pkg, + hba->delay_info.pkg.unf_cmnd_pload_bl.buffer_ptr, + 0, UNF_FALSE); + hba->delay_info.srq_delay_flag = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) srq delay work timeout, send saved plgoi to CM", + hba->port_cfg.port_id); + } +} + +unsigned int hifc_rport_session_rst(void *v_hba, + struct unf_rport_info_s *v_rport_info) +{ + /* NOT USE NOW */ + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_rport_info, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + if (!hba->parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x cannot find parent queue pool", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + if (v_rport_info->rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x free parent resource failed, invlaid rport index %u,Rport NPortId 0x%x", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id); + + return UNF_RETURN_ERROR; + } + + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[v_rport_info->rport_index]; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + if (v_parent_queue_info->offload_state == HIFC_QUEUE_STATE_OFFLOADED) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port 0x%x parent sq reset session, rport index 0x%x:0x%x,local nportid 0x%x,remote nportid 0x%x:0x%x,ctx id 0x%x, cid 0x%x", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id, + v_parent_queue_info->parent_sq_info.context_id, + v_parent_queue_info->parent_sq_info.cache_id); + + /* this scenario does not exist */ + (void)queue_delayed_work( + hba->work_queue, + &v_parent_queue_info->parent_sq_info.del_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SQ_DEL_STAGE_TIMEOUT_MS)); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, flag); + + /* + * The current session reset is in clear I/O mode, and the + * connection resources are not deleted + */ + ret = hifc_send_session_rst_cmd(hba, + v_parent_queue_info, + HIFC_SESS_RST_DELETE_IO_ONLY); + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port 0x%x parent sq is not offloaded, no need reset session , rport index 0x%x:0x%x,local nportid 0x%x,remote nportid 0x%x:0x%x", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, flag); + + ret = RETURN_OK; + } + + return ret; +} + +/** + * hifc_flush_ini_resp_queue - Pay attention to the processing that is being + * processed, but do not pay attention to the subsequent + * processing. This is the main difference between the + * HIFC_FlushScq and the HIFC_FlushScq. + * @v_hba: hba handle + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_flush_ini_resp_queue(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + hba = (struct hifc_hba_s *)v_hba; + + /* + * Although this function is called, the original HIFC_FlushScq is based + * on the scenario where the port is disabled. That is, the function is + * executed and the SCQ is empty. However, because the port is not + * disabled in the current scenario, it can only indicate that a batch + * of processing is completed. + */ + hifc_flush_sts_scq(hba); + + return RETURN_OK; +} + +/* + * Function Name : hifc_handle_aeq_queue_error + * Function Description: Process the queue error event sent by the chip + * through AEQ. + * Input Parameters : *v_hba, + * : *v_aeq_msg + * Output Parameters : N/A + * Return Type : void + */ +static void hifc_handle_aeq_queue_error(struct hifc_hba_s *v_hba, + struct hifcoe_aqe_data_s *v_aeq_msg) +{ + unsigned int sts_scqn_local = 0; + unsigned int full_ci = INVALID_VALUE32; + unsigned int full_ci_owner = INVALID_VALUE32; + struct hifc_scq_info_s *scq_info = NULL; + struct hifcoe_aqe_data_s *aeq_msg = NULL; + + aeq_msg = v_aeq_msg; + + sts_scqn_local = HIFC_RPORTID_TO_STS_SCQN(aeq_msg->wd0.conn_id); + scq_info = &v_hba->scq_info[sts_scqn_local]; + full_ci = scq_info->ci; + full_ci_owner = scq_info->ci_owner; + + /* + * Currently, Flush is forcibly set to StsScq. No matter whether scq is + * processed, AEQE is returned + */ + tasklet_schedule(&scq_info->tasklet); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) LocalScqn(0x%x) CqmScqn(0x%x) is full, force flush CI from (%d|0x%x) to (%d|0x%x)", + v_hba->port_cfg.port_id, aeq_msg->wd0.conn_id, + sts_scqn_local, scq_info->scqn, + full_ci_owner, full_ci, scq_info->ci_owner, scq_info->ci); +} + +void hifc_process_aeqe(void *v_srv_handle, + unsigned char event_type, + u64 event_val) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_srv_handle; + struct hifcoe_aqe_data_s aeq_msg; + unsigned long long aeq_info = 0; + unsigned char event_code = INVALID_VALUE8; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, hba, return); + + aeq_info = cpu_to_be64(event_val); + memcpy(&aeq_msg, (struct hifcoe_aqe_data_s *)&aeq_info, + sizeof(struct hifcoe_aqe_data_s)); + hifc_big_to_cpu32(&aeq_msg, sizeof(struct hifcoe_aqe_data_s)); + event_code = (unsigned char)aeq_msg.wd0.evt_code; + + switch (event_type) { + case FC_AEQ_EVENT_QUEUE_ERROR: + hifc_handle_aeq_queue_error(hba, &aeq_msg); + break; + + case FC_AEQ_EVENT_WQE_FATAL_ERROR: + UNF_LOWLEVEL_PORT_EVENT(ret, + hba->lport, + UNF_PORT_ABNORMAL_RESET, + NULL); + break; + + case FC_AEQ_EVENT_CTX_FATAL_ERROR: + break; + + case FC_AEQ_EVENT_OFFLOAD_ERROR: + ret = hifc_handle_aeq_offload_err(hba, &aeq_msg); + break; + + default: + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) receive a unsupported AEQ EventType(0x%x) EventVal(0x%llx).", + hba->port_cfg.port_id, event_type, + (unsigned long long)event_val); + return; + } + + if (event_code < FC_AEQ_EVT_ERR_CODE_BUTT) + HIFC_AEQ_ERR_TYPE_STAT(hba, aeq_msg.wd0.evt_code); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]Port(0x%x) receive AEQ EventType(0x%x) EventVal(0x%llx) EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x) %s", + hba->port_cfg.port_id, event_type, + (unsigned long long)event_val, event_code, + aeq_msg.wd0.conn_id, aeq_msg.wd1.xid, + (ret == UNF_RETURN_ERROR) ? "ERROR" : "OK"); +} + diff --git a/drivers/scsi/huawei/hifc/hifc_queue.h b/drivers/scsi/huawei/hifc/hifc_queue.h new file mode 100644 index 000000000000..cc3e753be7a6 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_queue.h @@ -0,0 +1,1363 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_QUEUE_H__ +#define __HIFC_QUEUE_H__ + +#include "hifc_wqe.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_cqm_main.h" + +#define WQE_MARKER_0 0x0 +#define WQE_MARKER_6B 0x6b + +#define HIFC_SQE_SIZE 128 +#define HIFC_MIN_WP_NUM 2 + +/* Counter */ +#define HIFC_STAT_SESSION_IO + +/*************** PARENT SQ&Context defines *******************************/ +#define HIFC_MAX_MSN (65535) +#define HIFC_MSN_MASK (0xffff000000000000LL) +#define HIFC_SQE_TS_SIZE (72) +#define HIFC_SQE_FIRST_OBIT_DW_POS (0) +#define HIFC_SQE_SECOND_OBIT_DW_POS (30) +#define HIFC_SQE_OBIT_SET_MASK_BE (0x80) +#define HIFC_SQE_OBIT_CLEAR_MASK_BE (0xffffff7f) +#define HIFC_MAX_SQ_TASK_TYPE_CNT (128) + +/* + * Note: if the location of flush done bit changes, the definition must be + * modifyed again + */ +#define HIFC_CTXT_FLUSH_DONE_DW_POS (58) +#define HIFC_CTXT_FLUSH_DONE_MASK_BE (0x4000) + +#define HIFC_GET_SQ_HEAD(v_sq) \ + list_entry((&(v_sq)->list_linked_list_sq)->next,\ + struct hifc_sq_wqe_page_s, entry_wpg) +#define HIFC_GET_SQ_TAIL(v_sq) \ + list_entry((&(v_sq)->list_linked_list_sq)->prev, \ + struct hifc_sq_wqe_page_s, entry_wpg) +#ifdef HIFC_STAT_SESSION_IO +#define HIFC_SQ_IO_STAT(v_sq, io_type) \ + (atomic_inc(&(v_sq)->io_stat[io_type])) +#define HIFC_SQ_IO_STAT_READ(v_sq, io_type) \ + (atomic_read(&(v_sq)->io_stat[io_type])) +#endif +#define HIFC_GET_QUEUE_CMSN(v_sq)\ + ((unsigned int)(be64_to_cpu(((((v_sq)->queue_header)->ci_record) \ + & HIFC_MSN_MASK)))) +#define HIFC_GET_WP_END_CMSN(head_start_cmsn, wqe_num_per_buf) \ + (unsigned short)(((unsigned int)(head_start_cmsn) +\ + (unsigned int)(wqe_num_per_buf) - 1) % (HIFC_MAX_MSN + 1)) +#define HIFC_MSN_INC(msn) (((HIFC_MAX_MSN) == (msn)) ? 0 : ((msn) + 1)) +#define HIFC_MSN_DEC(msn) ((0 == (msn)) ? (HIFC_MAX_MSN) : ((msn) - 1)) +#define HIFC_QUEUE_MSN_OFFSET(start_cmsn, end_cmsn) \ + (unsigned int)((((unsigned int)(end_cmsn) + (HIFC_MAX_MSN)) - \ + (unsigned int)(start_cmsn)) % (HIFC_MAX_MSN + 1)) + +/******************* ROOT SQ&RQ defines ***********************************/ +#define HIFC_ROOT_Q_CTX_SIZE (48) +#define HIFC_ROOT_Q_CTX_CI_WQE_HI_SHIFT (44) +#define HIFC_ROOT_Q_CTX_CI_WQE_LOW_SHIFT (12) +#define HIFC_ROOT_Q_CTX_CLA_HI_SHIFT (41) +#define HIFC_ROOT_Q_CTX_CLA_LOW_SHIFT (9) +#define HIFC_ROOT_TSO_LRO_SPACE (0) +#define HIFC_ROOT_CTX_WQE_PREFETCH_MAX (3) +#define HIFC_ROOT_CTX_WQE_PREFETCH_MIN (1) +#define HIFC_ROOT_CTX_WQE_PRERETCH_THRESHOLD (2) +#define HIFC_CI_WQE_PAGE_HIGH_ADDR(x) \ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CI_WQE_HI_SHIFT) & 0xffffffff) +#define HIFC_CI_WQE_PAGE_LOW_ADDR(x) \ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CI_WQE_LOW_SHIFT) & 0xffffffff) +#define HIFC_CLA_HIGH_ADDR(x)\ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CLA_HI_SHIFT) & 0xffffffff) +#define HIFC_CLA_LOW_ADDR(x) \ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CLA_LOW_SHIFT) & 0xffffffff) + +/*********************** ROOT SQ defines ***********************************/ +#define HIFC_ROOT_SQ_NUM (1) +#define HIFC_ROOT_SQ_DEPTH (2048) +#define HIFC_ROOT_SQ_WQEBB (64) +#define HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE (4) +#define HIFC_ROOT_SQ_LOOP_OWNER (1) +#define HIFC_ROOT_SQ_CI_ATTRIBUTE_ADDRESS_SHIFT (2) +#define HIFC_DOORBELL_SQ_TYPE (1) +#define HIFC_DOORBELL_SQ_PI_HIGH_BITS_SHIFT (8) +#define HIFC_DOORBELL_SQ_PI_LOW_BITS_MASK (0xFF) +#define HIFC_INT_NUM_PER_QUEUE (1) +#define HIFC_INT_ENABLE (1) +#define HIFC_ROOT_CFG_SQ_NUM_MAX (42) +#define HIFC_CMDQ_QUEUE_TYPE_SQ (0) +#define HIFC_GET_ROOT_SQ_CI_ADDR(addr, index) \ + ((addr) + (unsigned int)((index) * HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE)) +#define HIFC_ROOT_SQ_CTX_OFFSET(q_num, q_id) \ + ((HIFC_ROOT_TSO_LRO_SPACE * 2 * (q_num) +\ + HIFC_ROOT_Q_CTX_SIZE * (q_id)) / 16) + +/********************** ROOT RQ defines ***********************************/ +#define HIFC_ROOT_RQ_NUM (1) +#define HIFC_ROOT_RQ_DEPTH (1024) +#define HIFC_ROOT_RQ_WQEBB (32) +#define HIFC_ROOT_RQ_PI_TABLE_STEP_BYTE (4) +#define HIFC_ROOT_RQ_LOOP_OWNER (1) +#define HIFC_ROOT_RQ_RECV_BUFF_SIZE (1024) +#define HIFC_ROOT_Q_INT_ID_MAX (1024) /* 10bit */ +#define HIFC_ROOT_CFG_RQ_NUM_MAX (42) +#define HIFC_CMDQ_QUEUE_TYPE_RQ (1) +#define HIFC_RQE_MAX_PROCESS_NUM_PER_INTR (128) +#define HIFC_ROOT_RQ_CTX_OFFSET(q_num, q_id)\ + (((HIFC_ROOT_TSO_LRO_SPACE * 2 + HIFC_ROOT_Q_CTX_SIZE) * (q_num) +\ + HIFC_ROOT_Q_CTX_SIZE * (q_id)) / 16) + +/************************** SCQ defines ***********************************/ +#define HIFC_SCQ_INT_ID_MAX (2048) /* 11BIT */ +#define HIFC_SCQE_SIZE (64) +#define HIFC_CQE_GPA_SHIFT (4) +#define HIFC_NEXT_CQE_GPA_SHIFT (12) +/* 1-Update Ci by Tile, 0-Update Ci by Hardware */ +#define HIFC_PMSN_CI_TYPE_FROM_HOST (0) +#define HIFC_PMSN_CI_TYPE_FROM_UCODE (1) +#define HIFC_ARMQ_IDLE (0) +#define HIFC_CQ_INT_MODE (2) +#define HIFC_CQ_HEADER_OWNER_SHIFT (15) + +/* + * SCQC_CQ_DEPTH: 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k. + * include LinkWqe + */ +#define HIFC_CMD_SCQ_DEPTH (4096) +#define HIFC_STS_SCQ_DEPTH (8192) + +#define HIFC_CMD_SCQC_CQ_DEPTH (hifc_log2n(HIFC_CMD_SCQ_DEPTH >> 8)) +#define HIFC_STS_SCQC_CQ_DEPTH (hifc_log2n(HIFC_STS_SCQ_DEPTH >> 8)) +#define HIFC_STS_SCQ_CI_TYPE HIFC_PMSN_CI_TYPE_FROM_HOST + +#define HIFC_CMD_SCQ_CI_TYPE HIFC_PMSN_CI_TYPE_FROM_UCODE +#define HIFC_SCQ_INTR_LOW_LATENCY_MODE 0 +#define HIFC_SCQ_INTR_POLLING_MODE 1 + +#define HIFC_CQE_MAX_PROCESS_NUM_PER_INTR (128) +#define HIFC_SESSION_SCQ_NUM (16) + +/* + * SCQ[0, 2, 4 ...]CMD SCQ,SCQ[1, 3, 5 ...]STS SCQ,SCQ[HIFC_TOTAL_SCQ_NUM-1] + * Defaul SCQ + */ +#define HIFC_CMD_SCQN_START (0) +#define HIFC_STS_SCQN_START (1) +#define HIFC_SCQS_PER_SESSION (2) + +#define HIFC_TOTAL_SCQ_NUM (HIFC_SESSION_SCQ_NUM + 1) + +#define HIFC_SCQ_IS_STS(scq_index) \ + (((scq_index) % HIFC_SCQS_PER_SESSION) || \ + ((scq_index) == HIFC_SESSION_SCQ_NUM)) +#define HIFC_SCQ_IS_CMD(scq_index)\ + (!HIFC_SCQ_IS_STS(scq_index)) +#define HIFC_RPORTID_TO_CMD_SCQN(rport_index) \ + (((rport_index) * HIFC_SCQS_PER_SESSION) % HIFC_SESSION_SCQ_NUM) +#define HIFC_RPORTID_TO_STS_SCQN(rport_index) \ + ((((rport_index) * HIFC_SCQS_PER_SESSION) + 1) % HIFC_SESSION_SCQ_NUM) + +/************************** SRQ defines ***********************************/ +#define HIFC_SRQE_SIZE (32) +#define HIFC_SRQ_INIT_LOOP_O (1) +#define HIFC_QUEUE_RING (1) +#define HIFC_SRQ_ELS_DATA_NUM (1) +#define HIFC_SRQ_ELS_SGE_LEN (256) +#define HIFC_SRQ_ELS_DATA_DEPTH (4096) + +#define HIFC_IRQ_NAME_MAX (30) + +/* Support 2048 sessions(xid) */ +#define HIFC_CQM_XID_MASK (0x7ff) + +#define HIFC_QUEUE_FLUSH_DOING (0) +#define HIFC_QUEUE_FLUSH_DONE (1) +#define HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS (2000) +#define HIFC_QUEUE_FLUSH_WAIT_MS (2) + +/************************* RPort defines ***********************************/ +#define HIFC_EXIT_STRIDE (4096) +#define UNF_HIFC_MAXRPORT_NUM (2048) +#define HIFC_RPORT_OFFLOADED(prnt_qinfo) \ + ((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_OFFLOADED) +#define HIFC_RPORT_NOT_OFFLOADED(prnt_qinfo) \ + ((prnt_qinfo)->offload_state != HIFC_QUEUE_STATE_OFFLOADED) +#define HIFC_RPORT_FLUSH_NOT_NEEDED(prnt_qinfo)\ + (((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_INITIALIZED) || \ + ((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_OFFLOADING) || \ + ((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_FREE)) +#define HIFC_CHECK_XID_MATCHED(sq_xid, sqe_xid) \ + (((sq_xid) & HIFC_CQM_XID_MASK) == ((sqe_xid) & HIFC_CQM_XID_MASK)) +#define HIFC_PORT_MODE_TGT (0) /* Port mode */ +#define HIFC_PORT_MODE_INI (1) +#define HIFC_PORT_MODE_BOTH (2) + +/********** Hardware Reserved Queue Info defines ***************************/ +#define HIFC_HRQI_SEQ_ID_MAX (255) +#define HIFC_HRQI_SEQ_INDEX_MAX (64) +#define HIFC_HRQI_SEQ_INDEX_SHIFT (6) +#define HIFC_HRQI_SEQ_SEPCIAL_ID (3) +#define HIFC_HRQI_SEQ_INVALID_ID (~0LL) + +/************************* OQID defines ***********************************/ + +#define HIFC_OQID_HOST_XID_OFFSET (5) +#define HIFC_OQID_HOST_RW_OFFSET (4) +#define HIFC_OQID_HOST_ST_OFFSET (2) +#define HIFC_OQID_HOST_OQID_LEN (11) +#define HIFC_OQID_HOST_READ_FROM_HOST (0UL) +#define HIFC_OQID_HOST_WRITE_TO_HOST (1) +#define HIFC_CPI_CHNL_ID_XOE_READ (1UL) +#define HIFC_CPI_CHNL_ID_XOE_WRITE (3UL) +#define HIFC_SERVICE_TYPE_FC_FCOE (2) +/********************* sdk config defines ***********************************/ +#define HIFC_CNTX_SIZE_256B 256 +#define HIFC_QUEUE_LINK_STYLE 0 +#define HIFC_PACKET_COS_FC_CMD 0 +#define HIFC_PACKET_COS_FC_DATA 1 +#define HIFC_DB_ARM_DISABLE 0 +#define HIFC_DMA_ATTR_OFST 0 +#define HIFC_PCIE_TEMPLATE 0 +#define HIFC_PCIE_RELAXED_ORDERING 1 +#define HIFC_OWNER_DRIVER_PRODUCT 1 +#define HIFC_CMDQE_BUFF_LEN_MAX 2040 +#define HIFC_CNTX_SIZE_T_256B 0 + +#define HIFC_OQID_IO_HOST_SET(xid, rw, cidx, vf_id, m, oqid) \ + { \ + oqid = (unsigned short)(((unsigned short)\ + ((xid) << HIFC_OQID_HOST_XID_OFFSET)) \ + | ((unsigned short)((rw) << HIFC_OQID_HOST_RW_OFFSET)) \ + | ((unsigned short)(HIFC_SERVICE_TYPE_FC_FCOE << \ + HIFC_OQID_HOST_ST_OFFSET)) | (cidx)); \ + oqid = (unsigned short)\ + (((unsigned short)(oqid & (0x7ff >> (m))))\ + | ((unsigned short)((vf_id) << \ + (HIFC_OQID_HOST_OQID_LEN - (m))))); \ + } + +#define HIFC_OQID_RD(xid, vf_id, m, oq_id) \ + HIFC_OQID_IO_HOST_SET(xid, HIFC_OQID_HOST_READ_FROM_HOST,\ + HIFC_CPI_CHNL_ID_XOE_READ, vf_id, m, oq_id) + +#define HIFC_OQID_WR(xid, vf_id, m, oq_id) \ + HIFC_OQID_IO_HOST_SET(xid, HIFC_OQID_HOST_WRITE_TO_HOST,\ + HIFC_CPI_CHNL_ID_XOE_WRITE, vf_id, m, oq_id) + +enum hifc_session_reset_mode_e { + HIFC_SESS_RST_DELETE_IO_ONLY = 1, + HIFC_SESS_RST_DELETE_CONN_ONLY = 2, + HIFC_SESS_RST_DELETE_IO_CONN_BOTH = 3, + HIFC_SESS_RST_MODE_BUTT +}; + +/* linkwqe */ +#define CQM_LINK_WQE_CTRLSL_VALUE 2 +#define CQM_LINK_WQE_LP_VALID 1 +#define CQM_LINK_WQE_LP_INVALID 0 + +/****************** ROOT SQ&RQ&CTX defines ****************************/ +struct nic_tx_doorbell { + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 srv_type : 5; + u32 cos : 3; + u32 c_flag : 1; + u32 rsvd0 : 5; + u32 queue_id : 10; + u32 pi_high : 8; +#else + u32 pi_high : 8; + u32 queue_id : 10; + u32 rsvd0 : 5; + u32 c_flag : 1; + u32 cos : 3; + u32 srv_type : 5; +#endif + } bs0; + u32 dw0; + }; + + u32 rsvd1; +}; + +struct hifc_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u32 addr_offset; +}; + +/* + * nic_sq_ctx_1822 table define + */ +struct hifc_sq_ctxt { + union { + struct sq_ctx_dw0 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* whether generate CEQ */ + u32 ceq_arm : 1; + u32 rsvd1 : 7; + /* whether enable CEQ */ + u32 ceq_en : 1; + u32 global_sq_id : 10; + u32 ceq_num : 5; + u32 pkt_template : 6; + u32 rsvd2 : 2; +#else + u32 rsvd2 : 2; + u32 pkt_template : 6; + u32 ceq_num : 5; + u32 global_sq_id : 10; + /* whether enable CEQ */ + u32 ceq_en : 1; + u32 rsvd1 : 7; + /* whether generate CEQ */ + u32 ceq_arm : 1; +#endif + } sq_ctx_dw0; + u32 dw0; + }; + + union { + struct sq_ctx_dw1 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 wqe_template : 6; + u32 rsvd3 : 2; + u32 owner : 1; + /* customer index */ + u32 ci : 12; + u32 tso_doing : 1; + /* indicate how many sge left in current tso wqe */ + u32 sge_num_left : 6; + /* number of sge processing */ + u32 processing_sge : 3; + u32 rsvd4 : 1; +#else + u32 rsvd4 : 1; + /* number of sge processing */ + u32 processing_sge : 3; + /* indicate how many sge left in current tso wqe */ + u32 sge_num_left : 6; + u32 tso_doing : 1; + /* customer index */ + u32 ci : 12; + u32 owner : 1; + u32 rsvd3 : 2; + u32 wqe_template : 6; +#endif + } sq_ctx_dw1; + u32 dw1; + }; + + union { + struct sq_ctx_dw2 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd5 : 12; + /* the wqe page address that current ci point to */ + u32 ci_wqe_page_addr_hi : 20; +#else + /* the wqe page address that current ci point to */ + u32 ci_wqe_page_addr_hi : 20; + u32 rsvd5 : 12; +#endif + } sq_ctx_dw2; + u32 dw2; + }; + + u32 ci_wqe_page_addr_lo; + + union { + struct sq_ctx_dw4 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * The minimum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_min : 7; + /* + * The maximum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_max : 11; + u32 prefetch_cache_threshold : 14; +#else + u32 prefetch_cache_threshold : 14; + /* + * The maximum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_max : 11; + /* + * The minimum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_min : 7; +#endif + } sq_ctx_dw4; + u32 dw4; + }; + + union { + struct sq_ctx_dw5 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd6 : 31; + u32 prefetch_owner : 1; +#else + u32 prefetch_owner : 1; + u32 rsvd6 : 31; +#endif + } sq_ctx_dw5; + u32 dw5; + }; + + union { + struct sq_ctx_dw6 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 prefetch_ci : 12; + u32 prefetch_ci_wqe_addr_hi : 20; +#else + u32 prefetch_ci_wqe_addr_hi : 20; + u32 prefetch_ci : 12; +#endif + } sq_ctx_dw6; + u32 dw6; + }; + + u32 prefetch_ci_wqe_addr_lo; + + union { + struct sq_ctx_dw8 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* processed length of current seg */ + u32 processed_seg_len : 16; + u32 rsvd7 : 16; +#else + u32 rsvd7 : 16; + /* processed length of current seg */ + u32 processed_seg_len : 16; +#endif + } sq_ctx_dw8; + u32 dw8; + }; + + u32 qsf; + + union { + struct sq_ctx_dw10 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd8 : 9; + /* CI CLA table address */ + u32 cla_addr_hi : 23; +#else + /* CI CLA table address */ + u32 cla_addr_hi : 23; + u32 rsvd8 : 9; +#endif + } sq_ctx_dw10; + u32 dw10; + }; + + u32 cla_addr_lo; +}; + +struct hifc_sq_ctxt_block { + struct hifc_qp_ctxt_header cmdq_hdr; + struct hifc_sq_ctxt sq_ctx[HIFC_ROOT_CFG_SQ_NUM_MAX]; +}; + +/* + * nic_rq_ctx_1822 table define + */ +struct hifc_rq_ctxt { + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 max_count : 10; + u32 cqe_tmpl : 6; + u32 pkt_tmpl : 6; + u32 wqe_tmpl : 6; + u32 psge_valid : 1; + u32 rsvd1 : 1; + u32 owner : 1; + u32 ceq_en : 1; +#else + u32 ceq_en : 1; + u32 owner : 1; + u32 rsvd1 : 1; + u32 psge_valid : 1; + u32 wqe_tmpl : 6; + u32 pkt_tmpl : 6; + u32 cqe_tmpl : 6; + u32 max_count : 10; +#endif + } bs; + u32 dw0; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * Interrupt number that L2NIC engine tell SW if + * generate int instead of CEQ + */ + u32 int_num : 10; + u32 ceq_count : 10; + /* product index */ + u32 pi : 12; +#else + /* product index */ + u32 pi : 12; + u32 ceq_count : 10; + /* + * Interrupt number that L2NIC engine tell SW if + * generate int instead of CEQ + */ + u32 int_num : 10; +#endif + } bs0; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * CEQ arm, L2NIC engine will clear it after send ceq, + * driver should set it by CMD Q after receive all pkt. + */ + u32 ceq_arm : 1; + u32 eq_id : 5; + u32 rsvd2 : 4; + u32 ceq_count : 10; + /* product index */ + u32 pi : 12; +#else + /* product index */ + u32 pi : 12; + u32 ceq_count : 10; + u32 rsvd2 : 4; + u32 eq_id : 5; + /* CEQ arm, L2NIC engine will clear it after send ceq, + * driver should set it by CMD Q after receive all pkt. + */ + u32 ceq_arm : 1; +#endif + } bs1; + u32 dw1; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* consumer index */ + u32 ci : 12; + /* WQE page address of current CI point to, high part */ + u32 ci_wqe_page_addr_hi : 20; +#else + /* WQE page address of current CI point to, high part */ + u32 ci_wqe_page_addr_hi : 20; + /* consumer index */ + u32 ci : 12; +#endif + } bs2; + u32 dw2; + }; + + /* WQE page address of current CI point to, low part */ + u32 ci_wqe_page_addr_lo; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 prefetch_min : 7; + u32 prefetch_max : 11; + u32 prefetch_cache_threshold : 14; +#else + u32 prefetch_cache_threshold : 14; + u32 prefetch_max : 11; + u32 prefetch_min : 7; +#endif + } bs3; + u32 dw3; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd3 : 31; + /* ownership of WQE */ + u32 prefetch_owner : 1; +#else + /* ownership of WQE */ + u32 prefetch_owner : 1; + u32 rsvd3 : 31; +#endif + } bs4; + u32 dw4; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 prefetch_ci : 12; + /* high part */ + u32 prefetch_ci_wqe_page_addr_hi : 20; +#else + /* high part */ + u32 prefetch_ci_wqe_page_addr_hi : 20; + u32 prefetch_ci : 12; +#endif + } bs5; + u32 dw5; + }; + + /* low part */ + u32 prefetch_ci_wqe_page_addr_lo; + /* host mem GPA, high part */ + u32 pi_gpa_hi; + /* host mem GPA, low part */ + u32 pi_gpa_lo; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd4 : 9; + u32 ci_cla_tbl_addr_hi : 23; +#else + u32 ci_cla_tbl_addr_hi : 23; + u32 rsvd4 : 9; +#endif + } bs6; + u32 dw6; + }; + + u32 ci_cla_tbl_addr_lo; +}; + +struct hifc_rq_ctxt_block { + struct hifc_qp_ctxt_header cmdq_hdr; + struct hifc_rq_ctxt rq_ctx[HIFC_ROOT_CFG_RQ_NUM_MAX]; +}; + +struct hifc_root_qsf_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* packet priority, engine pass pri to ucode */ + u32 pri : 3; + /* unicast flag, engine pass uc to ucode */ + u32 uc : 1; + /* sctp packet, engine pass sctp to ucode */ + u32 sctp : 1; + /* mss */ + u32 mss : 14; + /* when set, hi1822 calculates the tcp/udp check sum of the packet */ + u32 tcp_udp_cs : 1; + /* + * transmit segmentation offload is activated when the tso flag is set + */ + u32 tso : 1; + /* for udp packet, engine read the whole udp packet from host by 1 dma + * read, and ipsu calculate udp checksum, ucode do ip segment + */ + u32 ufo : 1; + /* payload offset. it is the start position to calculate tcp/udp + * checksum or sctp crc + */ + u32 payload_offset : 8; + /* reserved */ + u32 route_to_ucode : 2; +#else + /* reserved */ + u32 route_to_ucode : 2; + /* + * payload offset. it is the start position to calculate tcp/udp + * checksum or sctp crc + */ + u32 payload_offset : 8; + /* + * for udp packet, engine read the whole udp packet from host by 1 dma + * read, and ipsu calculate udp checksum, ucode do ip segment + */ + u32 ufo : 1; + /* + * transmit segmentation offload is activated when the tso flag is set + */ + u32 tso : 1; + /* when set, hi1822 calculates the tcp/udp check sum of the packet */ + u32 tcp_udp_cs : 1; + /* mss */ + u32 mss : 14; + /* sctp packet, engine pass sctp to ucode */ + u32 sctp : 1; + /* unicast flag, engine pass uc to ucode */ + u32 uc : 1; + /* packet priority, engine pass pri to ucode */ + u32 pri : 3; +#endif +}; + +struct hifc_root_db_addr_s { + unsigned long long phy_addr; + void __iomem *virt_map_addr; +}; + +/* send queue management structure */ +struct hifc_root_sq_info_s { + spinlock_t root_sq_spin_lock; + + unsigned short qid; + unsigned short max_qnum; + unsigned short pi; /* ring buffer Pi */ + unsigned short ci; /* ring buffer Ci */ + unsigned short owner; + unsigned short hardware_write_back_value; + unsigned short q_depth; + unsigned short wqe_bb_size; /* WQE Basic size */ + + char irq_name[HIFC_IRQ_NAME_MAX]; + unsigned int irq_id; + unsigned short msix_entry_idx; + + unsigned short *ci_addr; + dma_addr_t ci_dma_addr; + + unsigned long long cla_addr; + void *sq_handle; + struct hifc_root_db_addr_s direct_db; + struct hifc_root_db_addr_s normal_db; + unsigned int db_idx; + unsigned int global_qpn; + int in_flush; + void *root_info; +}; + +struct hifc_root_rq_info_s { + unsigned short qid; + unsigned short max_qnum; + unsigned short pi; + unsigned short ci; + unsigned short owner; + + unsigned short q_depth; + unsigned short q_mask; + unsigned short wqe_bb_size; + + char irq_name[HIFC_IRQ_NAME_MAX]; + unsigned int irq_id; + unsigned short msix_entry_idx; + + unsigned short *pi_vir_addr; + dma_addr_t pi_dma_addr; + + /* Root RQ Receive Buffer size and completion buff */ + unsigned int rqc_buff_size; + void *rq_completion_buff; + dma_addr_t rq_completion_dma; + unsigned int rq_rcv_buff_size; + void *rq_rcv_buff; + dma_addr_t rq_rcv_dma; + void *rq_handle; + + /* for queue context init */ + unsigned long long ci_cla_tbl_addr; + + unsigned int global_qpn; + struct tasklet_struct tasklet; + atomic_t flush_state; + + void *root_info; +}; + +struct hifc_root_info_s { + void *phba; + unsigned int sq_num; + unsigned int sq_ci_table_size; + void *virt_sq_ci_table_buff; + dma_addr_t sq_ci_table_dma; + void *sq_info; + + unsigned int rq_num; + unsigned int rq_pi_table_size; + void *virt_rq_pi_table_buff; + dma_addr_t rq_pi_table_dma; + void *rq_info; +}; + +/**************************** SCQ defines ********************************/ +struct hifc_scq_info_s { + struct cqm_queue_s *cqm_scq_info; + unsigned int wqe_num_per_buf; + unsigned int wqe_size; + /* 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k */ + unsigned int scqc_cq_depth; + unsigned short scqc_ci_type; + unsigned short valid_wqe_num; /* ScQ depth include link wqe */ + unsigned short ci; + unsigned short ci_owner; + + unsigned int queue_id; + unsigned int scqn; + char irq_name[HIFC_IRQ_NAME_MAX]; + unsigned short msix_entry_idx; + unsigned int irq_id; + struct tasklet_struct tasklet; + atomic_t flush_state; + + void *phba; + unsigned int reserved; + struct task_struct *delay_task; + int task_exit; + unsigned int intrmode; +}; + +/************************* SRQ depth ***********************************/ +struct hifc_srq_ctx_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* DW0 */ + unsigned long long last_rq_pmsn : 16; + unsigned long long cur_rqe_msn : 16; + unsigned long long cur_rqe_user_id : 16; + unsigned long long parity : 8; + unsigned long long rsvd0 : 2; + unsigned long long pcie_template : 6; + + /* DW1 */ + unsigned long long cur_rqe_gpa; + + /* DW2 */ + unsigned long long cur_sge_v : 1; + unsigned long long cur_sge_l : 1; + unsigned long long int_mode : 2; + unsigned long long ceqn_msix : 11; + unsigned long long cur_sge_remain_len : 17; + unsigned long long cur_sge_id : 4; + unsigned long long consant_sge_len : 17; + unsigned long long cur_wqe : 1; + unsigned long long pmsn_type : 1; + unsigned long long bdsl : 4; + unsigned long long cr : 1; + unsigned long long csl : 2; + unsigned long long cf : 1; + unsigned long long ctrl_sl : 1; + + /* DW3 */ + unsigned long long cur_sge_gpa; + + /* DW4 */ + unsigned long long cur_pmsn_gpa; + + /* DW5 */ + unsigned long long pre_fetch_max_msn : 16; + unsigned long long cqe_max_cnt : 8; + unsigned long long cur_cqe_cnt : 8; + unsigned long long arm_q : 1; + unsigned long long rsvd1 : 7; + unsigned long long cq_so_ro : 2; + unsigned long long cqe_dma_attr_idx : 6; + unsigned long long rq_so_ro : 2; + unsigned long long rqe_dma_attr_idx : 6; + unsigned long long rsvd2 : 1; + unsigned long long loop_o : 1; + unsigned long long ring : 1; + unsigned long long rsvd3 : 5; + +#else + /* DW0 */ + unsigned long long pcie_template : 6; + unsigned long long rsvd0 : 2; + unsigned long long parity : 8; + unsigned long long cur_rqe_user_id : 16; + unsigned long long cur_rqe_msn : 16; + unsigned long long last_rq_pmsn : 16; + + /* DW1 */ + unsigned long long cur_rqe_gpa; + + /* DW2 */ + unsigned long long ctrl_sl : 1; + unsigned long long cf : 1; + unsigned long long csl : 2; + unsigned long long cr : 1; + unsigned long long bdsl : 4; + unsigned long long pmsn_type : 1; + unsigned long long cur_wqe : 1; + unsigned long long consant_sge_len : 17; + unsigned long long cur_sge_id : 4; + unsigned long long cur_sge_remain_len : 17; + unsigned long long ceqn_msix : 11; + unsigned long long int_mode : 2; + unsigned long long cur_sge_l : 1; + unsigned long long cur_sge_v : 1; + + /* DW3 */ + unsigned long long cur_sge_gpa; + + /* DW4 */ + unsigned long long cur_pmsn_gpa; + + /* DW5 */ + unsigned long long rsvd3 : 5; + unsigned long long ring : 1; + unsigned long long loop_o : 1; + unsigned long long rsvd2 : 1; + unsigned long long rqe_dma_attr_idx : 6; + unsigned long long rq_so_ro : 2; + unsigned long long cqe_dma_attr_idx : 6; + unsigned long long cq_so_ro : 2; + unsigned long long rsvd1 : 7; + unsigned long long arm_q : 1; + unsigned long long cur_cqe_cnt : 8; + unsigned long long cqe_max_cnt : 8; + unsigned long long pre_fetch_max_msn : 16; + +#endif + + /* DW6~DW7 */ + unsigned long long rsvd4; + unsigned long long rsvd5; + +}; + +struct hifc_srq_buff_entry_s { + unsigned short buff_id; + void *buff_addr; + dma_addr_t buff_dma; +}; + +enum hifc_clean_state_e { + HIFC_CLEAN_DONE, + HIFC_CLEAN_DOING, + HIFC_CLEAN_BUTT +}; + +enum hifc_srq_type_e { + HIFC_SRQ_ELS = 1, + HIFC_SRQ_BUTT +}; + +struct hifc_srq_info_s { + enum hifc_srq_type_e srq_type; + + struct cqm_queue_s *cqm_srq_info; + /* Wqe number per buf, dont't inlcude link wqe */ + unsigned int wqe_num_per_buf; + unsigned int wqe_size; + /* valid wqe number, dont't include link wqe */ + unsigned int valid_wqe_num; + unsigned short pi; + unsigned short pi_owner; + unsigned short pmsn; + unsigned short ci; + unsigned short cmsn; + unsigned int srqn; + + dma_addr_t first_rqe_rcv_dma; + + struct hifc_srq_buff_entry_s *els_buff_entry_head; + struct buf_describe_s buff_list; + spinlock_t srq_spin_lock; + int spin_lock_init; + int enable; + enum hifc_clean_state_e state; + struct delayed_work del_work; + unsigned int del_retry_time; + void *phba; +}; + +/* + * The doorbell record keeps PI of WQE, which will be produced next time. + * The PI is 15 bits width o-bit + */ +struct hifc_db_record { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u64 rsvd0 : 32; + unsigned long long dump_pmsn : 16; + unsigned long long pmsn : 16; +#else + unsigned long long pmsn : 16; + unsigned long long dump_pmsn : 16; + u64 rsvd0 : 32; +#endif +}; + +/* + * The ci record keeps CI of WQE, which will be consumed next time. + * The ci is 15 bits width with 1 o-bit + */ +struct hifc_ci_record_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u64 rsvd0 : 32; + unsigned long long dump_cmsn : 16; + unsigned long long cmsn : 16; +#else + unsigned long long cmsn : 16; + unsigned long long dump_cmsn : 16; + u64 rsvd0 : 32; +#endif +}; + +/* The accumulate data in WQ header */ +struct hifc_accumulate { + u64 data_2_uc; + u64 data_2_drv; +}; + +/* The WQ header structure */ +struct hifc_wq_header_s { + struct hifc_db_record db_record; + struct hifc_ci_record_s ci_record; + struct hifc_accumulate soft_data; + +}; + +/* Link list Sq WqePage Pool */ +/* queue header struct */ +struct hifc_queue_header_s { + unsigned long long doorbell_record; + unsigned long long ci_record; + unsigned long long ulrsv1; + unsigned long long ulrsv2; +}; + +/* WPG-WQEPAGE, LLSQ-LINKED LIST SQ */ +struct hifc_sq_wqe_page_s { + struct list_head entry_wpg; + /* Wqe Page virtual addr */ + void *wpg_addr; + /* Wqe Page physical addr */ + unsigned long long wpg_phy_addr; +}; + +struct hifc_sq_wqe_page_pool_s { + unsigned int wpg_cnt; + unsigned int wpg_size; + unsigned int wqe_per_wpg; + + /* PCI DMA Pool */ + struct dma_pool *wpg_dma_pool; + struct hifc_sq_wqe_page_s *wpg_pool_addr; + struct list_head list_free_wpg_pool; + spinlock_t wpg_pool_lock; + atomic_t wpg_in_use; +}; + +#define HIFC_SQ_DEL_STAGE_TIMEOUT_MS (3 * 1000) +#define HIFC_SRQ_DEL_STAGE_TIMEOUT_MS (10 * 1000) +#define HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS (10) +#define HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT (3) + +#define HIFC_SRQ_PROCESS_DELAY_MS (20) + +/* PLOGI parameters */ +struct hifc_plogi_coparams_s { + unsigned int seq_cnt : 1; + unsigned int ed_tov : 1; + unsigned int reserved : 14; + unsigned int tx_mfs : 16; + unsigned int ed_tov_timer_val; +}; + +struct hifc_delay_sqe_ctrl_info_s { + int valid; + unsigned int rport_index; + unsigned int time_out; + unsigned long long start_jiff; + unsigned int sid; + unsigned int did; + struct hifc_root_sqe_s sqe; +}; + +struct hifc_destroy_ctrl_info_s { + int valid; + unsigned int rport_index; + unsigned int time_out; + unsigned long long start_jiff; + struct unf_rport_info_s rport_info; +}; + +/* PARENT SQ Info */ +struct hifc_parent_sq_info_s { + void *phba; + + spinlock_t parent_sq_enqueue_lock; + atomic_t wqe_page_cnt; + unsigned int rport_index; + + unsigned int context_id; + + /* Fixed value,used for Doorbell */ + unsigned int sq_queue_id; + + /* When a session is offloaded, tile will return the CacheId to the + * driver,which is used for Doorbell + */ + unsigned int cache_id; + + /* service type, fc */ + unsigned int service_type; + + /* OQID */ + unsigned short oqid_rd; + unsigned short oqid_wr; + + unsigned int max_sqe_num; /* SQ depth */ + unsigned int wqe_num_per_buf; + unsigned int wqe_size; + + unsigned int wqe_offset; + unsigned short head_start_cmsn; + unsigned short head_end_cmsn; + unsigned short last_pmsn; + unsigned short last_pi_owner; + + unsigned int local_port_id; + unsigned int remote_port_id; + int port_in_flush; + int sq_in_sess_rst; + atomic_t sq_valid; + + void *queue_header_original; + struct hifc_queue_header_s *queue_header; + dma_addr_t queue_hdr_phy_addr_original; + dma_addr_t queue_hdr_phy_addr; + + /* Linked List SQ */ + struct list_head list_linked_list_sq; + + unsigned char vport_id; + struct delayed_work del_work; + struct delayed_work flush_done_tmo_work; + unsigned long long del_start_jiff; + dma_addr_t srq_ctx_addr; + atomic_t sq_cashed; + atomic_t fush_done_wait_cnt; + + struct hifc_plogi_coparams_s plogi_coparams; + + /* dif control info for immi */ + struct unf_dif_control_info_s sirt_dif_control; + + atomic_t sq_dbl_cnt; + atomic_t sq_wqe_cnt; + atomic_t sq_cqe_cnt; + atomic_t sqe_minus_cqe_cnt; + + struct hifc_delay_sqe_ctrl_info_s delay_sqe; + struct hifc_destroy_ctrl_info_s destroy_sqe; + atomic_t io_stat[HIFC_MAX_SQ_TASK_TYPE_CNT]; + +}; + +/* parent context doorbell */ +struct hifc_parent_sq_db_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 service_type : 5; + u32 cos : 3; + u32 c : 1; + u32 arm : 1; + u32 cntx_size : 2; + u32 vport : 7; + u32 xid : 13; +#else + u32 xid : 13; + u32 vport : 7; + u32 cntx_size : 2; + u32 arm : 1; + u32 c : 1; + u32 cos : 3; + u32 service_type : 5; +#endif + } wd0; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 qid : 4; + u32 sm_data : 20; + u32 pi_hi : 8; +#else + u32 pi_hi : 8; + u32 sm_data : 20; + u32 qid : 4; +#endif + } wd1; + +}; + +struct hifc_parent_cmd_scq_info_s { + unsigned int cqm_queue_id; + unsigned int local_queue_id; +}; + +struct hifc_parent_st_scq_info_s { + unsigned int cqm_queue_id; + unsigned int local_queue_id; +}; + +struct hifc_parent_els_srq_info_s { + unsigned int cqm_queue_id; + unsigned int local_queue_id; +}; + +enum hifc_parent_queue_state_e { + HIFC_QUEUE_STATE_INITIALIZED = 0, + HIFC_QUEUE_STATE_OFFLOADING = 1, + HIFC_QUEUE_STATE_OFFLOADED = 2, + HIFC_QUEUE_STATE_DESTROYING = 3, + HIFC_QUEUE_STATE_FREE = 4, + HIFC_QUEUE_STATE_BUTT +}; + +struct hifc_parent_ctx_s { + dma_addr_t parent_ctx; + /* Allocated by driver, Driver filled it when a session offload */ + void *virt_parent_ctx; + /* Allocated by CQM,used by Hardware */ + struct cqm_qpc_mpt_s *cqm_parent_ctx_obj; +}; + +struct hifc_parent_queue_info_s { + spinlock_t parent_queue_state_lock; + struct hifc_parent_ctx_s parent_ctx; + enum hifc_parent_queue_state_e offload_state; + struct hifc_parent_sq_info_s parent_sq_info; + /* Cmd Scq info which is assocaiated with parent queue */ + struct hifc_parent_cmd_scq_info_s parent_cmd_scq_info; + /* Sts Scq info which is assocaiated with parent queue */ + struct hifc_parent_st_scq_info_s parent_sts_scq_info; + /* ELS Srq info which is assocaiated with parent queue */ + unsigned char queue_vport_id; + struct hifc_parent_els_srq_info_s parent_els_srq_info; + unsigned char queue_data_cos; +}; + +struct hifc_parent_queue_mgr_s { + struct hifc_parent_queue_info_s parent_queues[UNF_HIFC_MAXRPORT_NUM]; + struct buf_describe_s parent_sq_buf_list; +}; + +struct hifc_get_global_base_qpn_s { + /* for new version interface */ + unsigned char status; + unsigned char version; + unsigned char rsvd0[6]; + + unsigned short func_id; + unsigned short base_qpn; +}; + +#define HIFC_SRQC_BUS_ROW 8 +#define HIFC_SRQC_BUS_COL 19 +#define HIFC_SQC_BUS_ROW 8 +#define HIFC_SQC_BUS_COL 13 +#define HIFC_HW_SCQC_BUS_ROW 6 +#define HIFC_HW_SCQC_BUS_COL 10 +#define HIFC_HW_SRQC_BUS_ROW 4 +#define HIFC_HW_SRQC_BUS_COL 15 +#define HIFC_SCQC_BUS_ROW 3 +#define HIFC_SCQC_BUS_COL 29 + +#define HIFC_QUEUE_INFO_BUS_NUM 4 +struct hifc_queue_info_bus_s { + unsigned long long bus[HIFC_QUEUE_INFO_BUS_NUM]; +}; + +unsigned int hifc_free_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info); +unsigned int hifc_alloc_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info); +unsigned int hifc_create_root_queues(void *v_hba); +void hifc_destroy_root_queues(void *v_hba); +unsigned int hifc_alloc_parent_queue_mgr(void *v_hba); +void hifc_free_parent_queue_mgr(void *v_hba); +unsigned int hifc_create_common_share_queues(void *v_hba); +void hifc_destroy_common_share_queues(void *v_hba); +unsigned int hifc_alloc_parent_sq_wqe_page_pool(void *v_hba); +void hifc_free_parent_sq_wqe_page_pool(void *v_hba); + +struct hifc_parent_queue_info_s *hifc_find_parent_queue_info_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg); +struct hifc_parent_sq_info_s *hifc_find_parent_sq_by_pkg( + void *v_hba, struct unf_frame_pkg_s *v_pkg); +struct hifc_parent_ctx_s *hifc_get_parnt_ctx_virt_addr_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg); +unsigned int hifc_get_parent_ctx_xid_by_pkg(void *v_hba, + struct unf_frame_pkg_s *v_pkg); + +unsigned int hifc_root_sq_enqueue(void *v_hba, + struct hifc_root_sqe_s *v_sqe); +void hifc_process_root_rqe(unsigned long v_rq_info); + +unsigned int hifc_root_cmdq_enqueue(void *v_hba, + union hifc_cmdqe_u *v_cmd_qe, + unsigned short v_cmd_len); + +void hifc_process_scq_cqe(unsigned long scq_info); +unsigned int hifc_process_scq_cqe_entity(unsigned long v_scq_info, + unsigned int proc_cnt); + +void hifc_post_els_srq_wqe(struct hifc_srq_info_s *v_srq_info, + unsigned short buf_id); +void hifc_process_aeqe(void *v_srv_handle, unsigned char evt_type, u64 evt_val); + +unsigned int hifc_parent_sq_enqueue(struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_sqe); +void hifc_free_sq_wqe_page(struct hifc_parent_sq_info_s *v_sq, + unsigned int cur_cmsn); +unsigned int hifc_reclaim_sq_wqe_page(void *v_hba, union hifcoe_scqe_u *v_scqe); + +void hifc_set_root_sq_flush_state(void *v_hba, int in_flush); +void hifc_set_rport_flush_state(void *v_hba, int in_flush); +unsigned int hifc_clear_fetched_sq_wqe(void *v_hba); +unsigned int hifc_clear_pending_sq_wqe(void *v_hba); + +void hifc_free_parent_queues(void *v_hba); +void hifc_enable_queues_dispatch(void *v_hba); +void hifc_queue_pre_process(void *v_hba, int v_clean); +void hifc_free_parent_queue_info( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info); +unsigned int hifc_send_session_rst_cmd( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info, + unsigned int v_mode); +void hifc_build_session_rst_wqe(void *v_hba, + struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_sqe, + enum hifc_session_reset_mode_e v_mode, + unsigned int scqn); + +unsigned int hifc_rport_session_rst(void *v_hba, + struct unf_rport_info_s *v_rport_info); +unsigned int hifc_get_rport_maped_cmd_scqn(void *v_hba, + unsigned int rport_index); +unsigned int hifc_get_rport_maped_sts_scqn(void *v_hba, + unsigned int rport_index); + +void hifc_destroy_srq(void *v_hba); +unsigned int hifc_push_delay_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_offload_parent_queue, + struct hifc_root_sqe_s *v_sqe, + struct unf_frame_pkg_s *v_pkg); + +void hifc_push_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_offload_parent_queue, + struct unf_rport_info_s *v_rport_info); +void hifc_pop_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_destroy_ctrl_info_s *v_destroy_sqe_info); +struct hifc_parent_queue_info_s *hifc_find_offload_parent_queue( + void *v_hba, + unsigned int v_local_id, + unsigned int v_remote_id, + unsigned int v_rport_index); + +unsigned int hifc_flush_ini_resp_queue(void *v_hba); +void hifc_rcvd_els_from_srq_time_out(struct work_struct *work); +#endif diff --git a/drivers/scsi/huawei/hifc/unf_lport.c b/drivers/scsi/huawei/hifc/unf_lport.c new file mode 100644 index 000000000000..09986f177fcc --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_lport.c @@ -0,0 +1,1129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_log.h" +#include "unf_common.h" +#include "unf_event.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" +#include "unf_npiv.h" + +static void unf_lport_timeout(struct work_struct *work); + +void unf_cmmark_dirty_mem(struct unf_lport_s *v_lport, + enum unf_lport_dirty_flag_e v_etype) +{ + UNF_CHECK_VALID(0x1801, UNF_TRUE, v_lport, return); + + v_lport->dirty_flag |= v_etype; +} + +unsigned int unf_init_lport_route(struct unf_lport_s *v_lport) +{ + int ret = 0; + + UNF_CHECK_VALID(0x1802, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Init L_Port route work */ + INIT_DELAYED_WORK(&v_lport->route_timer_work, unf_lport_route_work); + + /* Delay route work */ + ret = queue_delayed_work( + unf_work_queue, + &v_lport->route_timer_work, + (unsigned long)msecs_to_jiffies(UNF_LPORT_POLL_TIMER)); + if (unlikely(ret == UNF_FALSE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) schedule route work failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + return unf_lport_refinc(v_lport); +} + +void unf_destroy_lport_route(struct unf_lport_s *v_lport) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1803, UNF_TRUE, v_lport, return); + + /* Cancel (route timer) delay work */ + UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id, + &v_lport->route_timer_work, + "Route Timer work"); + if (ret == RETURN_OK) { + /* Corresponding to ADD operation */ + unf_lport_ref_dec(v_lport); + } + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE; +} + +static void unf_lport_config(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1816, UNF_TRUE, v_lport, return); + + INIT_DELAYED_WORK(&v_lport->retry_work, unf_lport_timeout); + + v_lport->max_retry_count = UNF_MAX_RETRY_COUNT; /* 3 */ + v_lport->retries = 0; +} + +void unf_init_portparms(struct unf_lport_s *v_lport) +{ + INIT_LIST_HEAD(&v_lport->list_vports_head); + INIT_LIST_HEAD(&v_lport->list_intergrad_vports); + INIT_LIST_HEAD(&v_lport->list_destroy_vports); + INIT_LIST_HEAD(&v_lport->entry_lport); + spin_lock_init(&v_lport->lport_state_lock); + + v_lport->max_frame_size = max_frame_size; + v_lport->ed_tov = UNF_DEFAULT_EDTOV; + v_lport->ra_tov = UNF_DEFAULT_RATOV; + v_lport->rr_tov = UNF_DEFAULT_RRTOV; + v_lport->fabric_node_name = 0; + v_lport->b_priority = UNF_PRIORITY_DISABLE; + v_lport->b_port_dir_exchange = UNF_FALSE; + /* Delay (retry) work init */ + unf_lport_config(v_lport); + + unf_set_lport_state(v_lport, UNF_LPORT_ST_ONLINE); /* online */ + + v_lport->link_up = UNF_PORT_LINK_DOWN; + v_lport->b_port_removing = UNF_FALSE; + v_lport->lport_free_completion = NULL; + v_lport->last_tx_fault_jif = 0; + v_lport->enhanced_features = 0; + v_lport->destroy_step = INVALID_VALUE32; + v_lport->dirty_flag = 0; + v_lport->b_switch_state = UNF_FALSE; + v_lport->b_bbscn_support = UNF_FALSE; + + v_lport->en_start_work_state = UNF_START_WORK_STOP; + v_lport->sfp_power_fault_count = 0; + v_lport->sfp_9545_fault_count = 0; + + atomic_set(&v_lport->port_no_operater_flag, UNF_LPORT_NORMAL); + atomic_set(&v_lport->lport_ref_cnt, 0); + atomic_set(&v_lport->scsi_session_add_success, 0); + atomic_set(&v_lport->scsi_session_add_failed, 0); + atomic_set(&v_lport->scsi_session_del_success, 0); + atomic_set(&v_lport->scsi_session_del_failed, 0); + atomic_set(&v_lport->add_start_work_failed, 0); + atomic_set(&v_lport->add_closing_work_failed, 0); + atomic_set(&v_lport->alloc_scsi_id, 0); + atomic_set(&v_lport->resume_scsi_id, 0); + atomic_set(&v_lport->reuse_scsi_id, 0); + atomic_set(&v_lport->device_alloc, 0); + atomic_set(&v_lport->device_destroy, 0); + atomic_set(&v_lport->session_loss_tmo, 0); + + atomic64_set(&v_lport->exchg_index, 1); + atomic_inc(&v_lport->lport_ref_cnt); + atomic_set(&v_lport->err_code_obtain_freq, 0); + + memset(&v_lport->link_service_info, 0, + sizeof(struct unf_link_service_collect_s)); + memset(&v_lport->err_code_sum, 0, sizeof(struct unf_err_code_s)); +} + +void unf_reset_lport_params(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + + UNF_CHECK_VALID(0x1804, UNF_TRUE, v_lport, return); + + lport->link_up = UNF_PORT_LINK_DOWN; + lport->nport_id = 0; /* Need do FLOGI again to clear N_Port_ID */ + lport->max_frame_size = max_frame_size; + lport->ed_tov = UNF_DEFAULT_EDTOV; + lport->ra_tov = UNF_DEFAULT_RATOV; + lport->rr_tov = UNF_DEFAULT_RRTOV; + lport->fabric_node_name = 0; +} + +static enum unf_lport_login_state_e unf_lport_stat_online( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_LINK_UP: + /* EVENT_LINK_UP --->>> ST_LINK_UP */ + next_state = UNF_LPORT_ST_LINK_UP; + break; + + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_initial( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_LINK_UP: + /* EVENT_LINK_UP --->>> ST_LINK_UP */ + next_state = UNF_LPORT_ST_LINK_UP; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_linkup( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> FLOGI_WAIT */ + next_state = UNF_LPORT_ST_FLOGI_WAIT; + break; + + case UNF_EVENT_LPORT_READY: + /* EVENT_READY --->>> ST_READY */ + next_state = UNF_LPORT_ST_READY; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_flogi_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_PLOGI_WAIT */ + next_state = UNF_LPORT_ST_PLOGI_WAIT; + break; + + case UNF_EVENT_LPORT_READY: + /* EVENT_READY --->>> ST_READY */ + next_state = UNF_LPORT_ST_READY; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_REMOTE_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_plogi_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_RFT_ID_WAIT */ + next_state = UNF_LPORT_ST_RFT_ID_WAIT; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_rftid_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_RFF_ID_WAIT */ + next_state = UNF_LPORT_ST_RFF_ID_WAIT; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_rffid_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_SCR_WAIT */ + next_state = UNF_LPORT_ST_SCR_WAIT; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_scr_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_READY */ + next_state = UNF_LPORT_ST_READY; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_logo( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> ST_OFFLINE */ + next_state = UNF_LPORT_ST_OFFLINE; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_offline( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_ONLINE: + /* EVENT_ONLINE --->>> ST_ONLINE */ + next_state = UNF_LPORT_ST_ONLINE; + break; + + case UNF_EVENT_LPORT_RESET: + /* EVENT_RESET --->>> ST_RESET */ + next_state = UNF_LPORT_ST_RESET; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_reset( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_ready( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + case UNF_EVENT_LPORT_RESET: + /* EVENT_RESET --->>> ST_RESET */ + next_state = UNF_LPORT_ST_RESET; + break; + + case UNF_EVENT_LPORT_OFFLINE: + /* EVENT_OFFLINE --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +void unf_lport_stat_ma(struct unf_lport_s *v_lport, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e old_state = UNF_LPORT_ST_ONLINE; + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + UNF_CHECK_VALID(0x1805, UNF_TRUE, v_lport, return); + + old_state = v_lport->en_states; + switch (v_lport->en_states) { + case UNF_LPORT_ST_ONLINE: + next_state = unf_lport_stat_online(old_state, event); + break; + + case UNF_LPORT_ST_INITIAL: + next_state = unf_lport_stat_initial(old_state, event); + break; + + case UNF_LPORT_ST_LINK_UP: + next_state = unf_lport_stat_linkup(old_state, event); + break; + + case UNF_LPORT_ST_FLOGI_WAIT: + next_state = unf_lport_stat_flogi_wait(old_state, event); + break; + + case UNF_LPORT_ST_PLOGI_WAIT: + next_state = unf_lport_stat_plogi_wait(old_state, event); + break; + + case UNF_LPORT_ST_RFT_ID_WAIT: + next_state = unf_lport_stat_rftid_wait(old_state, event); + break; + + case UNF_LPORT_ST_RFF_ID_WAIT: + next_state = unf_lport_stat_rffid_wait(old_state, event); + break; + + case UNF_LPORT_ST_SCR_WAIT: + next_state = unf_lport_state_scr_wait(old_state, event); + break; + + case UNF_LPORT_ST_LOGO: + next_state = unf_lport_state_logo(old_state, event); + break; + + case UNF_LPORT_ST_OFFLINE: + next_state = unf_lport_state_offline(old_state, event); + break; + + case UNF_LPORT_ST_RESET: + next_state = unf_lport_state_reset(old_state, event); + break; + + case UNF_LPORT_ST_READY: + next_state = unf_lport_state_ready(old_state, event); + break; + + default: + next_state = old_state; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) hold state(0x%x)", + v_lport->port_id, v_lport->en_states); + break; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) with old state(0x%x) event(0x%x) next state(0x%x)", + v_lport->port_id, old_state, event, next_state); + + unf_set_lport_state(v_lport, next_state); +} + +unsigned int unf_init_lport_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1806, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + v_lport->lport_mgr_temp.pfn_unf_vport_get_free_and_init = NULL; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index = + unf_lookup_vport_by_vp_index; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_port_id = + unf_lookup_vport_by_port_id; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did = + unf_lookup_vport_by_did; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn = + unf_lookup_vport_by_wwpn; + v_lport->lport_mgr_temp.pfn_unf_vport_remove = unf_vport_remove; + return RETURN_OK; +} + +void unf_release_lport_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1807, UNF_TRUE, v_lport, return); + + memset(&v_lport->lport_mgr_temp, 0, + sizeof(struct unf_cm_lport_template_s)); + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP; +} + +unsigned int unf_lport_retry_flogi(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1808, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Get (new) R_Port */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI); + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Check L_Port state */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + if (v_lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) no need to retry FLOGI with state(0x%x)", + v_lport->port_id, v_lport->en_states); + + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + return RETURN_OK; + } + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_FLOGI; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Send FLOGI or FDISC */ + if (v_lport != v_lport->root_lport) { + ret = unf_send_fdisc(v_lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FDISC failed", + v_lport->port_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + } else { + ret = unf_send_flogi(v_lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE( + UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FLOGI failed\n", + v_lport->port_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + } + + return ret; +} + +unsigned int unf_lport_name_server_register( + struct unf_lport_s *v_lport, + enum unf_lport_login_state_e states) +{ + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1809, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Get (safe) R_Port 0xfffffc */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_DIR_SERV); + rport = unf_get_safe_rport(v_lport, rport, UNF_RPORT_REUSE_ONLY, + UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Update R_Port & L_Port state */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_DIR_SERV; /* 0xfffffc */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + switch (states) { + /* RFT_ID */ + case UNF_LPORT_ST_RFT_ID_WAIT: + ret = unf_send_rft_id(v_lport, rport); + break; + + /* RFF_ID */ + case UNF_LPORT_ST_RFF_ID_WAIT: + ret = unf_send_rff_id(v_lport, rport); + break; + + /* SCR */ + case UNF_LPORT_ST_SCR_WAIT: + ret = unf_send_scr(v_lport, NULL); + break; + + /* PLOGI */ + case UNF_LPORT_ST_PLOGI_WAIT: + default: + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + ret = unf_send_plogi(v_lport, rport); + break; + } + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) register fabric(0xfffffc) failed", + v_lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + + return ret; +} + +unsigned int unf_lport_enter_sns_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1810, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + if (!v_rport) { + rport = unf_get_rport_by_nport_id(v_lport, + UNF_FC_FID_DIR_SERV); + } else { + rport = v_rport; + } + + if (!rport) { + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + return RETURN_OK; + } + + /* Update L_Port & R_Port state */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Do R_Port LOGO state */ + unf_rport_enter_logo(v_lport, rport); + + return ret; +} + +void unf_lport_enter_sns_plogi(struct unf_lport_s *v_lport) +{ + /* Fabric or Public Loop Mode: Login with Name server */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1811, UNF_TRUE, v_lport, return); + + /* Get (safe) R_Port 0xfffffc */ + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (rport) { + /* for port swap: Delete old R_Port if necessary */ + if (rport->local_nport_id != v_lport->nport_id) { + unf_rport_immediate_linkdown(v_lport, rport); + rport = NULL; + } + } + + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + unf_lport_error_recovery(lport); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_DIR_SERV; /* 0xfffffc */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Send PLOGI to Fabric(0xfffffc) */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send PLOGI to name server failed", + v_lport->port_id); + + unf_lport_error_recovery(lport); + } +} + +int unf_get_port_params(void *v_argin, void *v_argout) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_argin; + struct unf_low_level_port_mgr_op_s *port_mg = NULL; + struct unf_port_params_s port_params = { 0 }; + int ret = RETURN_OK; + + UNF_REFERNCE_VAR(v_argout); + UNF_CHECK_VALID(0x1812, UNF_TRUE, + v_argin, return UNF_RETURN_ERROR); + + port_mg = &lport->low_level_func.port_mgr_op; + if (!port_mg->pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) low level port_config_get function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_INFO, + "[warn]Port(0x%x) get parameters with default:R_A_TOV(%d) E_D_TOV(%d)", + lport->port_id, UNF_DEFAULT_FABRIC_RATOV, UNF_DEFAULT_EDTOV); + + port_params.ra_tov = UNF_DEFAULT_FABRIC_RATOV; + port_params.ed_tov = UNF_DEFAULT_EDTOV; + + /* Update parameters with Fabric mode */ + if ((lport->en_act_topo == UNF_ACT_TOP_PUBLIC_LOOP) || + (lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) { + lport->ra_tov = port_params.ra_tov; + lport->ed_tov = port_params.ed_tov; + } + + return ret; +} + +unsigned int unf_lport_enter_flogi(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct unf_cm_event_report *event = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x1813, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Get (safe) R_Port */ + nport_id = UNF_FC_FID_FLOGI; /* 0xfffffe */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI); + + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Updtae L_Port state */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + /* LPort: LINK UP --> FLOGI WAIT */ + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + /* Update R_Port N_Port_ID */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_FLOGI; /* 0xfffffe */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + event = unf_get_one_event_node(v_lport); + if (event) { + event->lport = v_lport; + event->event_asy_flag = UNF_EVENT_ASYN; + /* NULL for timer */ + event->pfn_unf_event_task = unf_get_port_params; + event->para_in = (void *)v_lport; + unf_post_one_event_node(v_lport, event); + } + + if (v_lport != v_lport->root_lport) { + /* for NPIV */ + ret = unf_send_fdisc(v_lport, rport); + if (ret != RETURN_OK) + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } else { + /* for Physical Port */ + ret = unf_send_flogi(v_lport, rport); + if (ret != RETURN_OK) + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + + return ret; +} + +void unf_set_lport_state(struct unf_lport_s *v_lport, + enum unf_lport_login_state_e states) +{ + UNF_CHECK_VALID(0x1814, UNF_TRUE, v_lport, return); + if (states != v_lport->en_states) { + /* Reset L_Port retry count */ + v_lport->retries = 0; + } + + v_lport->en_states = states; +} + +static void unf_lport_timeout(struct work_struct *work) +{ + struct unf_lport_s *lport = NULL; + enum unf_lport_login_state_e state = UNF_LPORT_ST_READY; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1815, UNF_TRUE, work, return); + lport = container_of(work, struct unf_lport_s, retry_work.work); + + spin_lock_irqsave(&lport->lport_state_lock, flag); + state = lport->en_states; + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is timeout with state(0x%x)", + lport->port_id, state); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + switch (state) { + /* FLOGI retry */ + case UNF_LPORT_ST_FLOGI_WAIT: + (void)unf_lport_retry_flogi(lport); + break; + + case UNF_LPORT_ST_PLOGI_WAIT: + case UNF_LPORT_ST_RFT_ID_WAIT: + case UNF_LPORT_ST_RFF_ID_WAIT: + case UNF_LPORT_ST_SCR_WAIT: + (void)unf_lport_name_server_register(lport, state); + break; + + /* Send LOGO External */ + case UNF_LPORT_ST_LOGO: + break; + + /* Do nothing */ + case UNF_LPORT_ST_OFFLINE: + case UNF_LPORT_ST_READY: + case UNF_LPORT_ST_RESET: + case UNF_LPORT_ST_ONLINE: + case UNF_LPORT_ST_INITIAL: + case UNF_LPORT_ST_LINK_UP: + + lport->retries = 0; + break; + default: + break; + } + + unf_lport_ref_dec_to_destroy(lport); +} + +void unf_lport_error_recovery(struct unf_lport_s *v_lport) +{ + unsigned long delay = 0; + unsigned long flag = 0; + int ret = 0; + + UNF_CHECK_VALID(0x1817, UNF_TRUE, v_lport, return); + + if (unlikely(unf_lport_refinc(v_lport) != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is removing and no need process", + v_lport->port_id); + return; + } + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + + /* Port State: removing */ + if (v_lport->b_port_removing == UNF_TRUE) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is removing and no need process", + v_lport->port_id); + + unf_lport_ref_dec_to_destroy(v_lport); + return; + } + + /* Port State: offline */ + if (v_lport->en_states == UNF_LPORT_ST_OFFLINE) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is offline and no need process", + v_lport->port_id); + + unf_lport_ref_dec_to_destroy(v_lport); + return; + } + + /* Queue work state check */ + if (delayed_work_pending(&v_lport->retry_work)) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + unf_lport_ref_dec_to_destroy(v_lport); + return; + } + + /* Do retry operation */ + if (v_lport->retries < v_lport->max_retry_count) { + v_lport->retries++; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) enter recovery and retry %u times", + v_lport->port_id, v_lport->nport_id, + v_lport->retries); + + delay = (unsigned long)v_lport->ed_tov; + ret = queue_delayed_work(unf_work_queue, + &v_lport->retry_work, + (unsigned long)msecs_to_jiffies( + (unsigned int)delay)); + if (ret) { + atomic_inc(&v_lport->lport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) queue work success and reference count is %d", + v_lport->port_id, + atomic_read(&v_lport->lport_ref_cnt)); + } + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + } else { + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_REMOTE_TIMEOUT); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) register operation timeout and do LOGO", + v_lport->port_id); + + /* Do L_Port LOGO */ + (void)unf_lport_enter_sns_logo(v_lport, NULL); + } + + unf_lport_ref_dec_to_destroy(v_lport); +} + +struct unf_lport_s *unf_cm_lookup_vport_by_vp_index(struct unf_lport_s *v_lport, + unsigned short v_vp_index) +{ + UNF_CHECK_VALID(0x1819, UNF_TRUE, v_lport, return NULL); + + if (v_vp_index == 0) + return v_lport; + + if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do look up vport by index is NULL", + v_lport->port_id); + + return NULL; + } + + return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index( + v_lport, v_vp_index); +} + +struct unf_lport_s *unf_cm_lookup_vport_by_did(struct unf_lport_s *v_lport, + unsigned int v_did) +{ + UNF_CHECK_VALID(0x1821, UNF_TRUE, v_lport, return NULL); + + if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do look up vport by D_ID is NULL", + v_lport->port_id); + + return NULL; + } + + return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did(v_lport, + v_did); +} + +struct unf_lport_s *unf_cm_lookup_vport_by_wwpn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn) +{ + UNF_CHECK_VALID(0x1822, UNF_TRUE, v_lport, return NULL); + + if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do look up vport by WWPN is NULL", + v_lport->port_id); + + return NULL; + } + + return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn(v_lport, + v_wwpn); +} + +void unf_cm_vport_remove(struct unf_lport_s *v_vport) +{ + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x1823, UNF_TRUE, v_vport, return); + lport = v_vport->root_lport; + UNF_CHECK_VALID(0x1824, UNF_TRUE, lport, return); + + if (!lport->lport_mgr_temp.pfn_unf_vport_remove) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do vport remove is NULL", + lport->port_id); + return; + } + + lport->lport_mgr_temp.pfn_unf_vport_remove(v_vport); +} diff --git a/drivers/scsi/huawei/hifc/unf_lport.h b/drivers/scsi/huawei/hifc/unf_lport.h new file mode 100644 index 000000000000..cb9105e12b51 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_lport.h @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __UNF_LPORT_H +#define __UNF_LPORT_H +#include "unf_disc.h" +#include "unf_event.h" +#include "unf_common.h" + +#define UNF_PORT_TYPE_FC 0 +#define UNF_PORT_TYPE_DISC 1 +#define UNF_FW_UPDATE_PATH_LEN_MAX 255 +#define UNF_EXCHG_MGR_NUM (4) + +#define UNF_MAX_IO_RETURN_VALUE 0x12 +#define UNF_MAX_SCSI_CMD 0xFF + +enum unf_scsi_error_handle_type { + UNF_SCSI_ABORT_IO_TYPE = 0, + UNF_SCSI_DEVICE_RESET_TYPE, + UNF_SCSI_TARGET_RESET_TYPE, + UNF_SCSI_BUS_RESET_TYPE, + UNF_SCSI_HOST_RESET_TYPE, + UNF_SCSI_VIRTUAL_RESET_TYPE, + UNF_SCSI_ERROR_HANDLE_BUTT +}; + +enum unf_lport_destroy_step_e { + UNF_LPORT_DESTROY_STEP_0_SET_REMOVING = 0, + UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT, + UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE, + UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER, + UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR, + UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL, + UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR, + UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP, + UNF_LPORT_DESTROY_STEP_8_DESTROY_RPORT_MG_TMP, + UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP, + UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE, + UNF_LPORT_DESTROY_STEP_11_UNREG_TGT_HOST, + UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST, + UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE, + UNF_LPORT_DESTROY_STEP_BUTT +}; + +enum unf_lport_enhanced_feature_e { + /* Enhance GFF feature connect even if fail to get GFF feature */ + UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF = 0x0001, + /* Enhance IO balance */ + UNF_LPORT_ENHANCED_FEATURE_IO_TRANSFERLIST = 0x0002, + /* Enhance IO check */ + UNF_LPORT_ENHANCED_FEATURE_IO_CHECKPOINT = 0x0004, + /* Close FW ROUTE */ + UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE = 0x0008, + /* lowest frequency read SFP information */ + UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE = 0x0010, + UNF_LPORT_ENHANCED_FEATURE_BUTT +}; + +enum unf_lport_login_state_e { + UNF_LPORT_ST_ONLINE = 0x2000, /* uninitialized */ + UNF_LPORT_ST_INITIAL, /* initialized and LinkDown */ + UNF_LPORT_ST_LINK_UP, /* initialized and Link UP */ + UNF_LPORT_ST_FLOGI_WAIT, /* waiting for FLOGI completion */ + UNF_LPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */ + UNF_LPORT_ST_RNN_ID_WAIT, /* waiting for RNN_ID completion */ + UNF_LPORT_ST_RSNN_NN_WAIT, /* waiting for RSNN_NN completion */ + UNF_LPORT_ST_RSPN_ID_WAIT, /* waiting for RSPN_ID completion */ + UNF_LPORT_ST_RPN_ID_WAIT, /* waiting for RPN_ID completion */ + UNF_LPORT_ST_RFT_ID_WAIT, /* waiting for RFT_ID completion */ + UNF_LPORT_ST_RFF_ID_WAIT, /* waiting for RFF_ID completion */ + UNF_LPORT_ST_SCR_WAIT, /* waiting for SCR completion */ + UNF_LPORT_ST_READY, /* ready for use */ + UNF_LPORT_ST_LOGO, /* waiting for LOGO completion */ + UNF_LPORT_ST_RESET, /* being reset and will restart */ + UNF_LPORT_ST_OFFLINE, /* offline */ + UNF_LPORT_ST_BUTT +}; + +enum unf_lport_event_e { + UNF_EVENT_LPORT_NORMAL_ENTER = 0x8000, /* next state enter */ + UNF_EVENT_LPORT_ONLINE = 0x8001, /* LPort link up */ + UNF_EVENT_LPORT_LINK_UP = 0x8002, /* LPort link up */ + UNF_EVENT_LPORT_LINK_DOWN = 0x8003, /* LPort link down */ + UNF_EVENT_LPORT_OFFLINE = 0x8004, /* lPort bing stopped */ + UNF_EVENT_LPORT_RESET = 0x8005, + UNF_EVENT_LPORT_REMOTE_ACC = 0x8006, /* next state enter */ + UNF_EVENT_LPORT_REMOTE_RJT = 0x8007, /* rport reject */ + UNF_EVENT_LPORT_REMOTE_TIMEOUT = 0x8008, /* rport time out */ + UNF_EVENT_LPORT_READY = 0x8009, + UNF_EVENT_LPORT_REMOTE_BUTT +}; + +struct unf_cm_disc_mg_template_s { + /* start input:L_Port,return:ok/fail */ + unsigned int (*pfn_unf_disc_start)(void *v_lport); + + /* stop input: L_Port,return:ok/fail */ + unsigned int (*pfn_unf_disc_stop)(void *v_lport); + + /* Callback after disc complete[with event:ok/fail]. */ + void (*pfn_unf_disc_callback)(void *v_lport, unsigned int v_result); +}; + +struct unf_chip_manage_info_s { + struct list_head list_chip_thread_entry; + struct list_head list_head; + spinlock_t chip_event_list_lock; + struct task_struct *data_thread; + unsigned int list_num; + unsigned int slot_id; + unsigned char chip_id; + unsigned char rsv; + unsigned char sfp_9545_fault; /* 9545 fault */ + unsigned char sfp_power_fault; /* SFP power fault */ + atomic_t ref_cnt; + unsigned int b_thread_exit; + struct unf_chip_info_s chip_info; + atomic_t card_loop_test_flag; + spinlock_t card_loop_back_state_lock; + char update_path[UNF_FW_UPDATE_PATH_LEN_MAX]; +}; + +enum unf_timer_type_e { + UNF_TIMER_TYPE_INI_IO, + UNF_TIMER_TYPE_REQ_IO, + UNF_TIMER_TYPE_INI_RRQ, + UNF_TIMER_TYPE_SFS, + UNF_TIMER_TYPE_INI_ABTS +}; + +struct unf_cm_xchg_mgr_template_s { + /* Get new Xchg */ + /* input:L_Port,ini/tgt type,return:initialized Xchg */ + void *(*pfn_unf_xchg_get_free_and_init)(void *, unsigned int, + unsigned short); + + /* OXID,SID lookup Xchg */ + /* input: L_Port,OXID,SID,return:Xchg */ + void *(*pfn_unf_look_up_xchg_by_id)(void *, unsigned short, + unsigned int); + + /* input:L_Port,tag,return:Xchg */ + void *(*pfn_unf_look_up_xchg_by_tag)(void *, unsigned short); + + /* free Xchg */ + /* input:L_Port,Xchg,return:void */ + void (*pfn_unf_xchg_release)(void *, void *); + + /* Abort IO Xchg by SID/DID */ + /* input:L_Port,SID,DID,return:void */ + void (*pfn_unf_xchg_mgr_io_xchg_abort)(void *, void *, unsigned int, + unsigned int, unsigned int); + + /* Abort SFS Xchg by SID/DID */ + /* input:L_Port,SID,DID,return:void */ + void (*pfn_unf_xchg_mgr_sfs_xchg_abort)(void *, void *, + unsigned int, unsigned int); + + /* Clean Xchg by SID/DID */ + /* input:L_Port,SID,DID,return:void */ + void (*pfn_unf_xchg_mgr_xchg_clean)(void *, unsigned int, + unsigned int); + + /* Add Xchg timer */ + void (*pfn_unf_xchg_add_timer)(void *, unsigned long, + enum unf_timer_type_e); + + /* Cancel Xchg timer */ + void (*pfn_unf_xchg_cancel_timer)(void *); + + /* L_Port, Abort flag */ + void (*pfn_unf_xchg_abort_all_io)(void *, unsigned int, int); + + /* find Xchg by scsi Cmnd sn */ + void *(*pfn_unf_look_up_xchg_by_cmnd_sn)(void *, unsigned long long, + unsigned int); + /* input:L_Port,unsigned long long */ + void (*pfn_unf_xchg_abort_by_lun)(void *, void *, unsigned long long, + void *, int); + + void (*pfn_unf_xchg_abort_by_session)(void *, void *); + +}; + +struct unf_rport_pool_s { + unsigned int rport_pool_count; + void *rport_pool_add; + struct list_head list_rports_pool; + spinlock_t rport_free_pool_lock; + /* for synchronous reuse RPort POOL completion */ + struct completion *rport_pool_completion; + unsigned long *pul_rpi_bitmap; +}; + +struct unf_cm_lport_template_s { + /* Get VPort struct and init */ + /* input:pstLport,ini/tgt type,return:pstVport */ + void *(*pfn_unf_vport_get_free_and_init)(void *, unsigned int); + + /* For fast IO path */ + /* input: pstLport, VpIndex, return:pstVport */ + void *(*pfn_unf_lookup_vport_by_vp_index)(void *, unsigned short); + + /* input: pstLport, PortId,return:pstVport */ + void *(*pfn_unf_lookup_vport_by_port_id)(void *, unsigned int); + + /* input:pstLport, wwpn, return:pstVport */ + void *(*pfn_unf_lookup_vport_by_wwpn)(void *, unsigned long long); + + /* input:L_Port, DID, return:pstVport */ + void *(*pfn_unf_lookup_vport_by_did)(void *, unsigned int); + + /* input:L_Port,return:void */ + void (*pfn_unf_vport_remove)(void *); + +}; + +struct unf_vport_pool_s { + unsigned short vport_pool_count; + void *vport_pool_addr; + struct list_head list_vport_pool; + spinlock_t vport_pool_lock; + struct completion *vport_pool_completion; + unsigned short slab_next_index; /* Next free vport */ + unsigned short slab_total_sum; /* Total Vport num */ + struct unf_lport_s *vport_slab[0]; +}; + +struct unf_esgl_pool_s { + unsigned int esgl_pool_count; + void *esgl_pool_addr; + struct list_head list_esgl_pool; + spinlock_t esgl_pool_lock; + struct buf_describe_s esgl_buf_list; +}; + +/* little endium */ +struct unf_port_id_page_s { + struct list_head list_node_rscn; + unsigned char port_id_port; + unsigned char port_id_area; + unsigned char port_id_domain; + + unsigned char uc_addr_format : 2; + unsigned char uc_event_qualifier : 4; + unsigned char uc_reserved : 2; +}; + +struct unf_rscn_mg_s { + spinlock_t rscn_id_list_lock; + unsigned int free_rscn_count; + + /* free RSCN page list */ + struct list_head list_free_rscn_page; + + /* using RSCN page list */ + struct list_head list_using_rscn_page; + + /* All RSCN PAGE Address */ + void *rscn_pool_add; + struct unf_port_id_page_s *(*pfn_unf_get_free_rscn_node)( + void *v_rscn_mg); + void (*pfn_unf_release_rscn_node)(void *v_rscn_mg, void *v_rscn_node); +}; + +struct unf_disc_rport_mg_s { + void *disc_pool_add; + struct list_head list_disc_rports_pool; /* discovery DISC Rport pool */ + struct list_head list_disc_rport_busy; /* Busy discovery DiscRport */ +}; + +struct unf_disc_manage_info_s { + struct list_head list_head; + spinlock_t disc_event_list_lock; + atomic_t disc_contrl_size; + + unsigned int b_thread_exit; + struct task_struct *data_thread; + +}; + +struct unf_disc_s { + unsigned int retry_count; /* current retry counter */ + unsigned int max_retry_count; /* retry counter */ + unsigned int disc_flag; /* Disc flag :Loop Disc,Fabric Disc */ + + struct completion *disc_completion; + atomic_t disc_ref_cnt; + + struct list_head list_busy_rports; /* Busy RPort list */ + struct list_head list_delete_rports; /* Delete RPort list */ + struct list_head list_destroy_rports; + + spinlock_t rport_busy_pool_lock; + + struct unf_lport_s *lport; + enum unf_disc_state_e en_states; + struct delayed_work disc_work; + + /* Disc operation template */ + struct unf_cm_disc_mg_template_s unf_disc_temp; + + /* UNF_INIT_DISC/UNF_RSCN_DISC */ + unsigned int disc_option; + + /* RSCN list */ + struct unf_rscn_mg_s rscn_mgr; + struct unf_disc_rport_mg_s disc_rport_mgr; + struct unf_disc_manage_info_s disc_thread_info; + + unsigned long long last_disc_jiff; +}; + +enum unf_service_item_e { + UNF_SERVICE_ITEM_FLOGI = 0, + UNF_SERVICE_ITEM_PLOGI, + UNF_SERVICE_ITEM_PRLI, + UNF_SERVICE_ITEM_RSCN, + UNF_SERVICE_ITEM_ABTS, + UNF_SERVICE_ITEM_PDISC, + UNF_SERVICE_ITEM_ADISC, + UNF_SERVICE_ITEM_LOGO, + UNF_SERVICE_ITEM_SRR, + UNF_SERVICE_ITEM_RRQ, + UNF_SERVICE_ITEM_ECHO, + UNF_SERVICE_ITEM_RLS, + UNF_SERVICE_BUTT +}; + +/* Link service counter */ +struct unf_link_service_collect_s { + unsigned long long service_cnt[UNF_SERVICE_BUTT]; +}; + +struct unf_pcie_error_count_s { + unsigned int pcie_error_count[UNF_PCIE_BUTT]; +}; + +#define INVALID_WWPN 0 + +enum unf_device_scsi_state_e { + UNF_SCSI_ST_INIT = 0, + UNF_SCSI_ST_OFFLINE, + UNF_SCSI_ST_ONLINE, + UNF_SCSI_ST_DEAD, + UNF_SCSI_ST_BUTT +}; + +struct unf_wwpn_dfx_counter_info_s { + atomic64_t io_done_cnt[UNF_MAX_IO_RETURN_VALUE]; + atomic64_t scsi_cmd_cnt[UNF_MAX_SCSI_CMD]; + atomic64_t target_busy; + atomic64_t host_busy; + atomic_t error_handle[UNF_SCSI_ERROR_HANDLE_BUTT]; + atomic_t error_handle_result[UNF_SCSI_ERROR_HANDLE_BUTT]; + atomic_t device_alloc; + atomic_t device_destroy; +}; + +#define UNF_MAX_LUN_PER_TARGET 256 +struct unf_wwpn_rport_info_s { + unsigned long long wwpn; + struct unf_rport_s *rport; /* Rport which linkup */ + void *lport; /* Lport */ + unsigned int target_id; /* target_id distribute by scsi */ + unsigned int last_en_scis_state; + atomic_t en_scsi_state; + struct unf_wwpn_dfx_counter_info_s *dfx_counter; + struct delayed_work loss_tmo_work; + int b_need_scan; + struct list_head fc_lun_list; +}; + +struct unf_rport_scsi_id_image_s { + spinlock_t scsi_image_table_lock; + /* ScsiId Wwpn table */ + struct unf_wwpn_rport_info_s *wwn_rport_info_table; + unsigned int max_scsi_id; +}; + +enum unf_lport_dirty_flag_e { + UNF_LPORT_DIRTY_FLAG_NONE = 0, + UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY = 0x100, + UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY = 0x200, + UNF_LPORT_DIRTY_FLAG_DISC_DIRTY = 0x400, + UNF_LPORT_DIRTY_FLAG_BUTT +}; + +typedef struct unf_rport_s *(*pfn_unf_rport_set_qualifier)( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport_by_nport_id, + struct unf_rport_s *v_rport_by_wwpn, + unsigned long long v_wwpn, + unsigned int v_sid); +typedef unsigned int (*pfn_unf_tmf_status_recovery)(void *v_rport, + void *v_xchg); + +enum unf_start_work_state_e { + UNF_START_WORK_STOP, + UNF_START_WORK_BEGIN, + UNF_START_WORK_COMPLETE +}; + +struct unf_ini_private_info_s { + unsigned int driver_type; /* Driver Type */ + void *lower; /* driver private pointer */ +}; + +struct unf_product_hosts_info_s { + void *p_tgt_host; + unf_scsi_host_s *p_scsi_host; + struct unf_ini_private_info_s drv_private_info; + unf_scsi_host_s scsi_host; + +}; + +struct unf_lport_s { + unsigned int port_type; /* Port Type: fc */ + atomic_t lport_ref_cnt; /* LPort reference counter */ + void *fc_port; /* hard adapter hba pointer */ + void *rport; /* Used for SCSI interface */ + void *vport; + + struct unf_product_hosts_info_s host_info; /* scsi host mg */ + struct unf_rport_scsi_id_image_s rport_scsi_table; + int b_port_removing; + + int b_port_dir_exchange; + + spinlock_t xchg_mgr_lock; + struct list_head list_xchg_mgr_head; + struct list_head list_dirty_xchg_mgr_head; + void *p_xchg_mgr[UNF_EXCHG_MGR_NUM]; + enum int_e b_priority; + struct list_head list_vports_head; /* Vport Mg */ + struct list_head list_intergrad_vports; /* Vport intergrad list */ + struct list_head list_destroy_vports; /* Vport destroy list */ + /* VPort entry, hook in list_vports_head */ + struct list_head entry_vport; + struct list_head entry_lport; /* LPort entry */ + spinlock_t lport_state_lock; /* UL Port Lock */ + struct unf_disc_s disc; /* Disc and rport Mg */ + /* rport pool,Vport share Lport pool */ + struct unf_rport_pool_s rport_pool; + struct unf_esgl_pool_s esgl_pool; /* external sgl pool */ + unsigned int port_id; /* Port Management ,0x11000 etc. */ + enum unf_lport_login_state_e en_states; + unsigned int link_up; + unsigned int speed; + + unsigned long long node_name; + unsigned long long port_name; + unsigned long long fabric_node_name; + unsigned int nport_id; + unsigned int max_frame_size; + unsigned int ed_tov; + unsigned int ra_tov; + unsigned int rr_tov; + + unsigned int options; /* ini or tgt */ + unsigned int retries; + unsigned int max_retry_count; + + enum unf_act_topo_e en_act_topo; + enum int_e b_switch_state; /* 1---->ON,FALSE---->OFF */ + enum int_e b_bbscn_support; /* 1---->ON,FALSE---->OFF */ + + enum unf_start_work_state_e en_start_work_state; + + /* Xchg Mg operation template */ + struct unf_cm_xchg_mgr_template_s xchg_mgr_temp; + struct unf_cm_lport_template_s lport_mgr_temp; + struct unf_low_level_function_op_s low_level_func; + struct unf_event_mgr event_mgr; /* Disc and rport Mg */ + struct delayed_work retry_work; /* poll work or delay work */ + + struct workqueue_struct *link_event_wq; + struct workqueue_struct *xchg_wq; + + struct unf_err_code_s err_code_sum; /* Error code counter */ + struct unf_link_service_collect_s link_service_info; + struct unf_pcie_error_count_s pcie_error_cnt; + pfn_unf_rport_set_qualifier pfn_unf_qualify_rport; /* Qualify Rport */ + /* tmf marker recovery */ + pfn_unf_tmf_status_recovery pfn_unf_tmf_abnormal_recovery; + struct delayed_work route_timer_work; /* L_Port timer route */ + + unsigned short vp_index; /* Vport Index, Lport:0 */ + struct unf_vport_pool_s *vport_pool; /* Only for Lport */ + + void *root_lport; /* Point to physic Lport */ + struct completion *lport_free_completion; /* Free LPort Completion */ + +#define UNF_LPORT_NOP 1 +#define UNF_LPORT_NORMAL 0 + + atomic_t port_no_operater_flag; + + unsigned int enhanced_features; /* Enhanced Features */ + + unsigned int destroy_step; + unsigned int dirty_flag; + + struct unf_lport_sfp_info sfp_info; + struct unf_chip_manage_info_s *chip_info; + +#define UNF_LOOP_BACK_TESTING 1 +#define UNF_LOOP_BACK_TEST_END 0 + + unsigned char sfp_power_fault_count; + unsigned char sfp_9545_fault_count; + unsigned long long last_tx_fault_jif; /* SFP last tx fault jiffies */ + + /* Server card: UNF_FC_SERVER_BOARD_32_G(6)for 32G mode, + * UNF_FC_SERVER_BOARD_16_G(7)for 16G mode + */ + unsigned int card_type; + atomic_t scsi_session_add_success; + atomic_t scsi_session_add_failed; + atomic_t scsi_session_del_success; + atomic_t scsi_session_del_failed; + atomic_t add_start_work_failed; + atomic_t add_closing_work_failed; + atomic_t device_alloc; + atomic_t device_destroy; + atomic_t session_loss_tmo; + atomic_t alloc_scsi_id; + atomic_t resume_scsi_id; + atomic_t reuse_scsi_id; + atomic64_t last_exchg_mgr_idx; + atomic64_t exchg_index; + + unsigned int pcie_link_down_cnt; + int b_pcie_linkdown; + unsigned char fw_version[HIFC_VER_LEN]; + + atomic_t link_lose_tmo; + atomic_t err_code_obtain_freq; +}; + +void unf_lport_stat_ma(struct unf_lport_s *v_lport, + enum unf_lport_event_e v_event); +void unf_lport_error_recovery(struct unf_lport_s *v_lport); +void unf_set_lport_state(struct unf_lport_s *v_lport, + enum unf_lport_login_state_e v_states); +void unf_init_portparms(struct unf_lport_s *v_lport); +unsigned int unf_lport_enter_flogi(struct unf_lport_s *v_lport); +void unf_lport_enter_sns_plogi(struct unf_lport_s *v_lport); +unsigned int unf_init_disc_mgr(struct unf_lport_s *v_pst_lport); +unsigned int unf_init_lport_route(struct unf_lport_s *v_lport); +void unf_destroy_lport_route(struct unf_lport_s *v_lport); +void unf_reset_lport_params(struct unf_lport_s *v_lport); +void unf_cmmark_dirty_mem(struct unf_lport_s *v_lport, + enum unf_lport_dirty_flag_e v_etype); + +struct unf_lport_s *unf_cm_lookup_vport_by_vp_index(struct unf_lport_s *v_lport, + unsigned short v_vp_index); +struct unf_lport_s *unf_cm_lookup_vport_by_did(struct unf_lport_s *v_lport, + unsigned int v_did); +struct unf_lport_s *unf_cm_lookup_vport_by_wwpn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn); +void unf_cm_vport_remove(struct unf_lport_s *v_vport); + +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_npiv.c b/drivers/scsi/huawei/hifc/unf_npiv.c new file mode 100644 index 000000000000..1c3e3e99272e --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_npiv.c @@ -0,0 +1,1481 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 1, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" +#include "unf_rport.h" +#include "unf_io.h" +#include "unf_npiv.h" + +/* Note: + * The function related with resources allocation in Vport is shared with Lport, + * and rootLport is acted as parameters in this function including : + * stEsglPool; + * event_mgr; + * stRportPool + * ExchMgr + */ + +#define UNF_DELETE_VPORT_MAX_WAIT_TIME_MS 60000 + +unsigned int unf_init_vport_pool(struct unf_lport_s *v_lport) +{ + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned short vport_cnt = 0; + struct unf_lport_s *vport = NULL; + struct unf_vport_pool_s *vport_pool; + unsigned int vport_pool_size = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1950, UNF_TRUE, v_lport, return RETURN_ERROR); + + UNF_TOU16_CHECK(vport_cnt, v_lport->low_level_func.support_max_npiv_num, + return RETURN_ERROR); + if (vport_cnt == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) do not support NPIV", + v_lport->port_id); + + return RETURN_OK; + } + + vport_pool_size = sizeof(struct unf_vport_pool_s) + + sizeof(struct unf_lport_s *) * vport_cnt; + v_lport->vport_pool = vmalloc(vport_pool_size); + if (!v_lport->vport_pool) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cannot allocate vport pool", + v_lport->port_id); + + return RETURN_ERROR; + } + memset(v_lport->vport_pool, 0, vport_pool_size); + vport_pool = v_lport->vport_pool; + vport_pool->vport_pool_count = vport_cnt; + vport_pool->vport_pool_completion = NULL; + spin_lock_init(&vport_pool->vport_pool_lock); + INIT_LIST_HEAD(&vport_pool->list_vport_pool); + + vport_pool->vport_pool_addr = vmalloc( + (size_t)(vport_cnt * sizeof(struct unf_lport_s))); + if (!vport_pool->vport_pool_addr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cannot allocate vport pool address", + v_lport->port_id); + vfree(v_lport->vport_pool); + v_lport->vport_pool = NULL; + + return RETURN_ERROR; + } + + memset(vport_pool->vport_pool_addr, 0, vport_cnt * + sizeof(struct unf_lport_s)); + vport = (struct unf_lport_s *)vport_pool->vport_pool_addr; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + for (i = 0; i < vport_cnt; i++) { + list_add_tail(&vport->entry_vport, + &vport_pool->list_vport_pool); + vport++; + } + + vport_pool->slab_next_index = 0; + vport_pool->slab_total_sum = vport_cnt; + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + return ret; +} + +void unf_free_vport_pool(struct unf_lport_s *v_lport) +{ + struct unf_vport_pool_s *vport_pool = NULL; + int wait = UNF_FALSE; + unsigned long flag = 0; + unsigned int remain = 0; + struct completion vport_pool_completion = + COMPLETION_INITIALIZER(vport_pool_completion); + + UNF_CHECK_VALID(0x1951, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x1952, UNF_TRUE, v_lport->vport_pool, return); + vport_pool = v_lport->vport_pool; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + + if (vport_pool->slab_total_sum != vport_pool->vport_pool_count) { + vport_pool->vport_pool_completion = &vport_pool_completion; + remain = vport_pool->slab_total_sum - + vport_pool->vport_pool_count; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for vport pool completion(%ld) remain(%d)", + v_lport->port_id, jiffies, remain); + + wait_for_completion(vport_pool->vport_pool_completion); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for vport pool completion end(%ld)", + v_lport->port_id, jiffies); + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + vport_pool->vport_pool_completion = NULL; + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + } + + if (v_lport->vport_pool->vport_pool_addr) { + vfree(v_lport->vport_pool->vport_pool_addr); + v_lport->vport_pool->vport_pool_addr = NULL; + } + vfree(v_lport->vport_pool); + v_lport->vport_pool = NULL; + + UNF_REFERNCE_VAR(remain); +} + +static inline struct unf_lport_s *unf_get_vport_by_slab_index( + struct unf_vport_pool_s *v_vport_pool, + unsigned short v_slab_index) +{ + UNF_CHECK_VALID(0x1953, UNF_TRUE, v_vport_pool, return NULL); + + return v_vport_pool->vport_slab[v_slab_index]; +} + +static inline void unf_vport_pool_slab_set( + struct unf_vport_pool_s *v_vport_pool, + unsigned short v_slab_index, + struct unf_lport_s *v_vport) +{ + UNF_CHECK_VALID(0x1954, UNF_TRUE, v_vport_pool, return); + + v_vport_pool->vport_slab[v_slab_index] = v_vport; +} + +unsigned int unf_alloc_vp_index(struct unf_vport_pool_s *v_vport_pool, + struct unf_lport_s *v_vport, + unsigned short v_vpid) +{ + unsigned short slab_index = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1955, UNF_TRUE, v_vport_pool, return RETURN_ERROR); + UNF_CHECK_VALID(0x1956, UNF_TRUE, v_vport, return RETURN_ERROR); + + spin_lock_irqsave(&v_vport_pool->vport_pool_lock, flags); + if (v_vpid == 0) { + slab_index = v_vport_pool->slab_next_index; + while (unf_get_vport_by_slab_index(v_vport_pool, slab_index)) { + slab_index = (slab_index + 1) % + v_vport_pool->slab_total_sum; + + if (slab_index == v_vport_pool->slab_next_index) { + spin_unlock_irqrestore( + &v_vport_pool->vport_pool_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]VPort pool has no slab "); + + return RETURN_ERROR; + } + } + } else { + slab_index = v_vpid - 1; + if (unf_get_vport_by_slab_index(v_vport_pool, slab_index)) { + spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]VPort Index(0x%x) is occupy", v_vpid); + + return RETURN_ERROR; + } + } + + unf_vport_pool_slab_set(v_vport_pool, slab_index, v_vport); + + v_vport_pool->slab_next_index = (slab_index + 1) % + v_vport_pool->slab_total_sum; + + spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&v_vport->lport_state_lock, flags); + v_vport->vp_index = slab_index + 1; /* VpIndex=SlabIndex+1 */ + spin_unlock_irqrestore(&v_vport->lport_state_lock, flags); + + return RETURN_OK; +} + +void unf_free_vp_index(struct unf_vport_pool_s *v_vport_pool, + struct unf_lport_s *v_vport) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1957, UNF_TRUE, v_vport_pool, return); + UNF_CHECK_VALID(0x1958, UNF_TRUE, v_vport, return); + + if ((v_vport->vp_index == 0) || + (v_vport->vp_index > v_vport_pool->slab_total_sum)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Input vpoot index(0x%x) is beyond the normal range, min(0x1), max(0x%x).", + v_vport->vp_index, v_vport_pool->slab_total_sum); + return; + } + + spin_lock_irqsave(&v_vport_pool->vport_pool_lock, flags); + /* SlabIndex=VpIndex-1 */ + unf_vport_pool_slab_set(v_vport_pool, v_vport->vp_index - 1, NULL); + spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&v_vport->lport_state_lock, flags); + v_vport->vp_index = INVALID_VALUE16; + spin_unlock_irqrestore(&v_vport->lport_state_lock, flags); +} + +struct unf_lport_s *unf_get_free_vport(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *vport = NULL; + struct list_head *list_head = NULL; + struct unf_vport_pool_s *vport_pool; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1959, 1, v_lport, return NULL); + UNF_CHECK_VALID(0x1960, UNF_TRUE, v_lport->vport_pool, return NULL); + + vport_pool = v_lport->vport_pool; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + if (!list_empty(&vport_pool->list_vport_pool)) { + list_head = (&vport_pool->list_vport_pool)->next; + list_del(list_head); + vport_pool->vport_pool_count--; + list_add_tail(list_head, &v_lport->list_vports_head); + vport = list_entry(list_head, struct unf_lport_s, entry_vport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]LPort(0x%x)'s vport pool is empty", + v_lport->port_id); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + return NULL; + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + return vport; +} + +void unf_vport_back_to_pool(void *v_vport) +{ + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *list = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1961, UNF_TRUE, v_vport, return); + vport = v_vport; + lport = (struct unf_lport_s *)(vport->root_lport); + UNF_CHECK_VALID(0x1962, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x1963, UNF_TRUE, lport->vport_pool, return); + + unf_free_vp_index(lport->vport_pool, vport); + + spin_lock_irqsave(&lport->vport_pool->vport_pool_lock, flag); + + list = &vport->entry_vport; + list_del(list); + list_add_tail(list, &lport->vport_pool->list_vport_pool); + lport->vport_pool->vport_pool_count++; + + spin_unlock_irqrestore(&lport->vport_pool->vport_pool_lock, flag); +} + +void unf_init_vport_from_lport(struct unf_lport_s *v_vport, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1964, UNF_TRUE, v_vport, return); + UNF_CHECK_VALID(0x1965, UNF_TRUE, v_lport, return); + + v_vport->port_type = v_lport->port_type; + v_vport->fc_port = v_lport->fc_port; + v_vport->en_act_topo = v_lport->en_act_topo; + v_vport->root_lport = v_lport; + v_vport->pfn_unf_qualify_rport = v_lport->pfn_unf_qualify_rport; + v_vport->link_event_wq = v_lport->link_event_wq; + v_vport->xchg_wq = v_lport->xchg_wq; + + memcpy(&v_vport->xchg_mgr_temp, &v_lport->xchg_mgr_temp, + sizeof(struct unf_cm_xchg_mgr_template_s)); + + memcpy(&v_vport->event_mgr, &v_lport->event_mgr, + sizeof(struct unf_event_mgr)); + + memset(&v_vport->lport_mgr_temp, 0, + sizeof(struct unf_cm_lport_template_s)); + + memcpy(&v_vport->low_level_func, &v_lport->low_level_func, + sizeof(struct unf_low_level_function_op_s)); +} + +void unf_check_vport_pool_status(struct unf_lport_s *v_lport) +{ + struct unf_vport_pool_s *vport_pool = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1968, UNF_TRUE, v_lport, return); + vport_pool = v_lport->vport_pool; + UNF_CHECK_VALID(0x1969, UNF_TRUE, vport_pool, return); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + + if ((vport_pool->vport_pool_completion) && + (vport_pool->slab_total_sum == vport_pool->vport_pool_count)) + complete(vport_pool->vport_pool_completion); + + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); +} + +void unf_vport_fabric_logo(struct unf_lport_s *v_vport) +{ + struct unf_rport_s *rport = NULL; + + rport = unf_get_rport_by_nport_id(v_vport, UNF_FC_FID_FLOGI); + UNF_CHECK_VALID(0x1970, UNF_TRUE, rport, return); + (void)unf_send_logo(v_vport, rport); +} + +void unf_vport_deinit(void *v_vport) +{ + struct unf_lport_s *vport = NULL; + + UNF_CHECK_VALID(0x1971, UNF_TRUE, v_vport, return); + vport = (struct unf_lport_s *)v_vport; + + unf_unregister_scsi_host(vport); + + unf_disc_mgr_destroy(vport); + + unf_release_xchg_mgr_temp(vport); + + unf_release_lport_mgr_temp(vport); + + unf_destroy_scsi_id_table(vport); + + unf_lport_release_lw_fun_op(vport); + vport->fc_port = NULL; + vport->vport = NULL; + + if (vport->lport_free_completion) { + complete(vport->lport_free_completion); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]VPort(0x%x) point(0x%p) completion free function is NULL", + vport->port_id, vport); + dump_stack(); + } +} + +void unf_vport_ref_dec(struct unf_lport_s *v_vport) +{ + UNF_CHECK_VALID(0x1972, UNF_TRUE, v_vport, return); + + if (atomic_dec_and_test(&v_vport->lport_ref_cnt)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]VPort(0x%x) point(0x%p) reference count is 0 and freevport", + v_vport->port_id, v_vport); + + unf_vport_deinit(v_vport); + } +} + +unsigned int unf_vport_init(void *v_vport) +{ + struct unf_lport_s *vport = NULL; + + UNF_CHECK_VALID(0x1974, UNF_TRUE, v_vport, return RETURN_ERROR); + vport = (struct unf_lport_s *)v_vport; + + vport->options = UNF_PORT_MODE_INI; + vport->nport_id = 0; + + if (unf_init_scsi_id_table(vport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Vport(0x%x) can not initialize SCSI ID table", + vport->port_id); + + return RETURN_ERROR; + } + + if (unf_init_disc_mgr(vport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Vport(0x%x) can not initialize discover manager", + vport->port_id); + unf_destroy_scsi_id_table(vport); + + return RETURN_ERROR; + } + + if (unf_register_scsi_host(vport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Vport(0x%x) vport can not register SCSI host", + vport->port_id); + unf_disc_mgr_destroy(vport); + unf_destroy_scsi_id_table(vport); + + return RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Vport(0x%x) Create succeed with wwpn(0x%llx)", + vport->port_id, vport->port_name); + + return RETURN_OK; +} + +void unf_vport_remove(void *v_vport) +{ + struct unf_lport_s *vport = NULL; + struct unf_lport_s *lport = NULL; + struct completion vport_free_completion = + COMPLETION_INITIALIZER(vport_free_completion); + + UNF_CHECK_VALID(0x1975, UNF_TRUE, v_vport, return); + vport = (struct unf_lport_s *)v_vport; + lport = (struct unf_lport_s *)(vport->root_lport); + vport->lport_free_completion = &vport_free_completion; + + unf_set_lport_removing(vport); + + unf_vport_ref_dec(vport); + + wait_for_completion(vport->lport_free_completion); + unf_vport_back_to_pool(vport); + + unf_check_vport_pool_status(lport); +} + +void *unf_lookup_vport_by_vp_index(void *v_lport, unsigned short v_vp_index) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1976, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + if ((v_vp_index == 0) || (v_vp_index > vport_pool->slab_total_sum)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) input vport index(0x%x) is beyond the normal range(0x1~0x%x)", + lport->port_id, v_vp_index, + vport_pool->slab_total_sum); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + /* SlabIndex=VpIndex-1 */ + vport = unf_get_vport_by_slab_index(vport_pool, v_vp_index - 1); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + return (void *)vport; +} + +void *unf_lookup_vport_by_port_id(void *v_lport, unsigned int v_port_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1977, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_id == v_port_id) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_id == v_port_id) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no vport ID(0x%x).", + lport->port_id, v_port_id); + return NULL; +} + +void *unf_lookup_vport_by_did(void *v_lport, unsigned int v_did) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1978, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->nport_id == v_did) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->nport_id == v_did) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no vport Nport ID(0x%x)", + lport->port_id, v_did); + return NULL; +} + +void *unf_lookup_vport_by_wwpn(void *v_lport, unsigned long long v_wwpn) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1979, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_name == v_wwpn) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_name == v_wwpn) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) has no vport WWPN(0x%llx)", + lport->port_id, v_wwpn); + + return NULL; +} + +struct unf_lport_s *unf_alloc_vport(struct unf_lport_s *lport, + unsigned long long v_wwpn) +{ + struct unf_lport_s *vport = NULL; + + vport = unf_cm_lookup_vport_by_wwpn(lport, v_wwpn); + if (vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) has find vport with wwpn(0x%llx), can't create again", + lport->port_id, v_wwpn); + + return NULL; + } + + vport = unf_get_free_vport(lport); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Can not get free vport from pool"); + + return NULL; + } + vport->root_lport = lport; + vport->port_name = v_wwpn; + + unf_init_portparms(vport); + unf_init_vport_from_lport(vport, lport); + + if (unf_alloc_vp_index(lport->vport_pool, vport, 0) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Vport can not allocate vport index"); + unf_vport_back_to_pool(vport); + + return NULL; + } + vport->port_id = (((unsigned int)vport->vp_index) << + PORTID_VPINDEX_SHIT) | lport->port_id; + + return vport; +} + +unsigned int unf_npiv_conf(unsigned int v_port_id, unsigned long long v_wwpn) +{ +#define VPORT_WWN_MASK 0xff00ffffffffffff +#define VPORT_WWN_SHIFT 48 + + struct fc_vport_identifiers vid = { 0 }; + struct fc_vport *fc_port = NULL; + struct Scsi_Host *shost = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + unsigned short vport_id = 0; + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Cannot find LPort by (0x%x).", v_port_id); + + return RETURN_ERROR; + } + + vport = unf_cm_lookup_vport_by_wwpn(lport, v_wwpn); + if (vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) has find vport with wwpn(0x%llx), can't create again", + lport->port_id, v_wwpn); + + return RETURN_ERROR; + } + + vport = unf_get_free_vport(lport); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Can not get free vport from pool"); + + return RETURN_ERROR; + } + + unf_init_portparms(vport); + unf_init_vport_from_lport(vport, lport); + + if ((lport->port_name & VPORT_WWN_MASK) == (v_wwpn & VPORT_WWN_MASK)) { + vport_id = (v_wwpn & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT; + if (vport_id == 0) { + vport_id = (lport->port_name & ~VPORT_WWN_MASK) >> + VPORT_WWN_SHIFT; + } + } + + if (unf_alloc_vp_index(lport->vport_pool, vport, vport_id) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Vport can not allocate vport index"); + unf_vport_back_to_pool(vport); + + return RETURN_ERROR; + } + + vport->port_id = (((unsigned int)vport->vp_index) << + PORTID_VPINDEX_SHIT) | lport->port_id; + + vid.roles = FC_PORT_ROLE_FCP_INITIATOR; + vid.vport_type = FC_PORTTYPE_NPIV; + vid.disable = false; + vid.node_name = lport->node_name; + + if (v_wwpn != 0) { + vid.port_name = v_wwpn; + } else { + if ((lport->port_name & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT != + vport->vp_index) + vid.port_name = + (lport->port_name & VPORT_WWN_MASK) | + (((unsigned long long)vport->vp_index) << + VPORT_WWN_SHIFT); + else + vid.port_name = (lport->port_name & VPORT_WWN_MASK); + } + + vport->port_name = vid.port_name; + + shost = lport->host_info.p_scsi_host; + + fc_port = fc_vport_create(shost, 0, &vid); + if (!fc_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) Cannot Failed to create vport wwpn=%llx", + lport->port_id, vid.port_name); + + unf_vport_back_to_pool(vport); + + return RETURN_ERROR; + } + + return RETURN_OK; +} + +struct unf_lport_s *unf_create_vport(struct unf_lport_s *v_lport, + struct vport_config_s *v_vport_config) +{ + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + enum unf_act_topo_e lport_topo = UNF_ACT_TOP_UNKNOWN; + enum unf_lport_login_state_e lport_state = UNF_LPORT_ST_ONLINE; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1983, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x1983, UNF_TRUE, v_vport_config, return NULL); + + if (v_vport_config->port_mode != FC_PORT_ROLE_FCP_INITIATOR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Only support INITIATOR port mode(0x%x)", + v_vport_config->port_mode); + + return NULL; + } + lport = v_lport; + + if (lport != lport->root_lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) not root port return", + lport->port_id); + + return NULL; + } + + vport = unf_cm_lookup_vport_by_wwpn(lport, v_vport_config->port_name); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) can not find vport with wwpn(0x%llx)", + lport->port_id, v_vport_config->port_name); + + return NULL; + } + + ret = unf_vport_init(vport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]VPort(0x%x) can not initialze vport", + vport->port_id); + + return NULL; + } + + spin_lock_irqsave(&lport->lport_state_lock, flag); + lport_topo = lport->en_act_topo; + lport_state = lport->en_states; + v_vport_config->node_name = lport->node_name; + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + vport->port_name = v_vport_config->port_name; + vport->node_name = v_vport_config->node_name; + vport->nport_id = 0; + + /* only fabric topo support NPIV */ + if ((lport_topo == UNF_ACT_TOP_P2P_FABRIC) && + /* after receive flogi acc */ + (lport_state >= UNF_LPORT_ST_PLOGI_WAIT) && + (lport_state <= UNF_LPORT_ST_READY)) { + vport->link_up = lport->link_up; + (void)unf_lport_login(vport, lport_topo); + } + + return vport; +} + +unsigned int unf_drop_vport(struct unf_lport_s *v_vport) +{ + unsigned int ret = RETURN_ERROR; + struct fc_vport *vport = NULL; + + UNF_CHECK_VALID(0x1985, UNF_TRUE, v_vport, return RETURN_ERROR); + + vport = v_vport->vport; + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]VPort(0x%x) find vport in scsi is NULL", + v_vport->port_id); + + return ret; + } + + ret = fc_vport_terminate(vport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]VPort(0x%x) terminate vport(%p) in scsi failed", + v_vport->port_id, vport); + + return ret; + } + return ret; +} + +unsigned int unf_delete_vport(unsigned int v_port_id, unsigned int v_vp_index) +{ + struct unf_lport_s *lport = NULL; + unsigned short vp_index = 0; + struct unf_lport_s *vport = NULL; + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can not be found by portid", + v_port_id); + + return RETURN_ERROR; + } + + if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) is in NOP, destroy all vports function will be called", + lport->port_id); + + return RETURN_OK; + } + + UNF_TOU16_CHECK(vp_index, v_vp_index, return RETURN_ERROR); + vport = unf_cm_lookup_vport_by_vp_index(lport, vp_index); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Can not lookup VPort by VPort index(0x%x)", + vp_index); + + return RETURN_ERROR; + } + + return unf_drop_vport(vport); +} + +void unf_vport_abort_all_sfs_exch(struct unf_lport_s *vport) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *exch = NULL; + unsigned long pool_lock_flags = 0; + unsigned long exch_lock_flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x1985, UNF_TRUE, vport, return); + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport( + (struct unf_lport_s *)(vport->root_lport), i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + ((struct unf_lport_s *) + (vport->root_lport))->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->sfs_busylist) { + exch = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&exch->xchg_state_lock, + exch_lock_flags); + if (vport == exch->lport && + (atomic_read(&exch->ref_cnt) > 0)) { + exch->io_state |= TGT_IO_STATE_ABORT; + spin_unlock_irqrestore(&exch->xchg_state_lock, + exch_lock_flags); + unf_disc_ctrl_size_inc(vport, exch->cmnd_code); + /* Transfer exch to destroy chain */ + list_del(xchg_node); + list_add_tail(xchg_node, + &hot_pool->list_destroy_xchg); + + } else { + spin_unlock_irqrestore(&exch->xchg_state_lock, + exch_lock_flags); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +void unf_vport_abort_ini_io_exch(struct unf_lport_s *vport) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *exch = NULL; + unsigned long pool_lock_flags = 0; + unsigned long exch_lock_flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x1986, UNF_TRUE, vport, return); + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport( + (struct unf_lport_s *)(vport->root_lport), i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) MgrIdex %d hot pool is NULL", + ((struct unf_lport_s *) + (vport->root_lport))->port_id, i); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->ini_busylist) { + exch = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + if (vport == exch->lport && + atomic_read(&exch->ref_cnt) > 0) { + /* Transfer exch to destroy chain */ + list_del(xchg_node); + list_add_tail(xchg_node, + &hot_pool->list_destroy_xchg); + + spin_lock_irqsave(&exch->xchg_state_lock, + exch_lock_flags); + exch->io_state |= INI_IO_STATE_DRABORT; + spin_unlock_irqrestore(&exch->xchg_state_lock, + exch_lock_flags); + } + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +void unf_vport_abort_all_exch(struct unf_lport_s *vport) +{ + UNF_CHECK_VALID(0x1988, UNF_TRUE, vport, return); + + unf_vport_abort_all_sfs_exch(vport); + + unf_vport_abort_ini_io_exch(vport); +} + +unsigned int unf_vport_wait_all_exch_removed(struct unf_lport_s *vport) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *exch = NULL; + unsigned int vport_uses = 0; + unsigned long flags = 0; + unsigned long long cur_jif = jiffies; + unsigned int i = 0; + + UNF_CHECK_VALID(0x1989, UNF_TRUE, vport, return RETURN_ERROR); + + while (1) { + vport_uses = 0; + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport( + (struct unf_lport_s *) + (vport->root_lport), i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, + UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) hot Pool is NULL", + ((struct unf_lport_s *) + (vport->root_lport))->port_id); + + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->list_destroy_xchg) { + exch = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + if (vport != exch->lport) + continue; + + vport_uses++; + + if (jiffies - cur_jif >= + msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_NORMAL, UNF_ERR, + "[error]VPort(0x%x) Abort Exch(0x%p) Type(0x%x) OxRxid(0x%x 0x%x), sid did(0x%x 0x%x) SeqId(0x%x) IOState(0x%x) Ref(0x%x)", + vport->port_id, exch, + (unsigned int)exch->xchg_type, + (unsigned int)exch->ox_id, + (unsigned int)exch->rx_id, + (unsigned int)exch->sid, + (unsigned int)exch->did, + (unsigned int)exch->seq_id, + (unsigned int)exch->io_state, + atomic_read(&exch->ref_cnt)); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + flags); + } + + if (vport_uses == 0) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]VPort(0x%x) has removed all exchanges it used", + vport->port_id); + break; + } + + if (jiffies - cur_jif >= msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS)) + return RETURN_ERROR; + + msleep(1000); + } + + return RETURN_OK; +} + +unsigned int unf_vport_wait_rports_removed(struct unf_lport_s *vport) +{ + struct unf_disc_s *disc = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned int vport_uses = 0; + unsigned long flags = 0; + unsigned long long cur_jif = jiffies; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x1990, UNF_TRUE, vport, return RETURN_ERROR); + disc = &vport->disc; + + while (1) { + vport_uses = 0; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flags); + list_for_each_safe(node, next_node, &disc->list_delete_rports) { + rport = list_entry(node, struct unf_rport_s, + entry_rport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, + UNF_MAJOR, + "[info]Vport(0x%x) Rport(0x%x) point(%p) is in Delete", + vport->port_id, rport->nport_id, rport); + vport_uses++; + } + list_for_each_safe(node, next_node, + &disc->list_destroy_rports) { + rport = list_entry(node, struct unf_rport_s, + entry_rport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, + UNF_MAJOR, + "[info]Vport(0x%x) Rport(0x%x) point(%p) is in Destroy", + vport->port_id, rport->nport_id, rport); + vport_uses++; + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags); + + if (vport_uses == 0) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]VPort(0x%x) has removed all RPorts it used", + vport->port_id); + break; + } + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Vport(0x%x) has %d RPorts not removed wait timeout(30s)", + vport->port_id, vport_uses); + + if (jiffies - cur_jif >= + msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS)) + return RETURN_ERROR; + + msleep(5000); + } + + UNF_REFERNCE_VAR(rport); + + return RETURN_OK; +} + +unsigned int unf_destroy_one_vport(struct unf_lport_s *vport) +{ + unsigned int ret = RETURN_ERROR; + struct unf_lport_s *root_port = NULL; + + UNF_CHECK_VALID(0x1992, UNF_TRUE, vport, return RETURN_ERROR); + + root_port = (struct unf_lport_s *)vport->root_lport; + + unf_vport_fabric_logo(vport); + + /* 1 set NOP */ + atomic_set(&vport->port_no_operater_flag, UNF_LPORT_NOP); + vport->b_port_removing = UNF_TRUE; + + /* 2 report linkdown to scsi and delele rpot */ + unf_link_down_one_vport(vport); + + /* 3 set abort for exchange */ + unf_vport_abort_all_exch(vport); + + /* 4 wait exch return freepool */ + if (!root_port->b_port_dir_exchange) { + ret = unf_vport_wait_all_exch_removed(vport); + if (ret != RETURN_OK) { + if ((root_port->b_port_removing) != UNF_TRUE) { + vport->b_port_removing = UNF_FALSE; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, + UNF_ERR, + "[err]VPort(0x%x) can not wait Exchange return freepool", + vport->port_id); + + return RETURN_ERROR; + } + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) is removing, there is dirty exchange, continue", + root_port->port_id); + + root_port->b_port_dir_exchange = UNF_TRUE; + } + } + + /* wait rport return rportpool */ + ret = unf_vport_wait_rports_removed(vport); + if (ret != RETURN_OK) { + vport->b_port_removing = UNF_FALSE; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]VPort(0x%x) can not wait Rport return freepool", + vport->port_id); + + return RETURN_ERROR; + } + + unf_cm_vport_remove(vport); + + return RETURN_OK; +} + +void unf_link_down_one_vport(struct unf_lport_s *v_vport) +{ + unsigned long flag = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[info]VPort(0x%x) linkdown", v_vport->port_id); + + spin_lock_irqsave(&v_vport->lport_state_lock, flag); + v_vport->link_up = UNF_PORT_LINK_DOWN; + v_vport->nport_id = 0; /* set nportid 0 before send fdisc again */ + unf_lport_stat_ma(v_vport, UNF_EVENT_LPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_vport->lport_state_lock, flag); + + root_lport = (struct unf_lport_s *)v_vport->root_lport; + + unf_flush_disc_event(&root_lport->disc, v_vport); + + unf_clean_linkdown_rport(v_vport); +} + +void unf_linkdown_all_vports(void *v_lport) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1993, UNF_TRUE, v_lport, return); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) VPort pool is NULL", + lport->port_id); + + return; + } + + /* Transfer to the transition chain */ + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, + &lport->list_intergrad_vports); + (void)unf_lport_refinc(vport); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + while (!list_empty(&lport->list_intergrad_vports)) { + node = (&lport->list_intergrad_vports)->next; + vport = list_entry(node, struct unf_lport_s, entry_vport); + + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_vports_head); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + unf_link_down_one_vport(vport); + + unf_vport_ref_dec(vport); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); +} + +int unf_process_vports_linkup(void *v_arg_in, void *v_arg_out) +{ + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + int ret = RETURN_OK; + + UNF_REFERNCE_VAR(v_arg_out); + UNF_CHECK_VALID(0x1994, UNF_TRUE, v_arg_in, return RETURN_ERROR); + + lport = (struct unf_lport_s *)v_arg_in; + + if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is NOP don't continue", + lport->port_id); + + return RETURN_OK; + } + + if (lport->link_up != UNF_PORT_LINK_UP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is not linkup don't continue.", + lport->port_id); + + return RETURN_OK; + } + + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) VPort pool is NULL.", + lport->port_id); + + return RETURN_OK; + } + + /* Transfer to the transition chain */ + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, + &lport->list_intergrad_vports); + (void)unf_lport_refinc(vport); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + while (!list_empty(&lport->list_intergrad_vports)) { + node = (&lport->list_intergrad_vports)->next; + vport = list_entry(node, struct unf_lport_s, entry_vport); + + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_vports_head); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + if (atomic_read(&vport->port_no_operater_flag) == + UNF_LPORT_NOP) { + unf_vport_ref_dec(vport); + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + continue; + } + + if ((lport->link_up == UNF_PORT_LINK_UP) && + (lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Vport(0x%x) begin login", + vport->port_id); + + vport->link_up = UNF_PORT_LINK_UP; + (void)unf_lport_login(vport, lport->en_act_topo); + + msleep(100); + } else { + unf_link_down_one_vport(vport); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Vport(0x%x) login failed because root port linkdown", + vport->port_id); + } + + unf_vport_ref_dec(vport); + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + return ret; +} + +void unf_linkup_all_vports(struct unf_lport_s *v_lport) +{ + struct unf_cm_event_report *event = NULL; + + UNF_CHECK_VALID(0x1996, UNF_TRUE, v_lport, return); + + if (unlikely((!v_lport->event_mgr.pfn_unf_get_free_event) || + (!v_lport->event_mgr.pfn_unf_post_event) || + (!v_lport->event_mgr.pfn_unf_release_event))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) Event fun is NULL", + v_lport->port_id); + return; + } + + event = v_lport->event_mgr.pfn_unf_get_free_event((void *)v_lport); + UNF_CHECK_VALID(0x1997, UNF_TRUE, event, return); + + event->lport = v_lport; + event->event_asy_flag = UNF_EVENT_ASYN; + event->pfn_unf_event_task = unf_process_vports_linkup; + event->para_in = (void *)v_lport; + + v_lport->event_mgr.pfn_unf_post_event(v_lport, event); +} + +void unf_destroy_all_vports(struct unf_lport_s *v_lport) +{ + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + lport = v_lport; + UNF_CHECK_VALID(0x1998, UNF_TRUE, lport, return); + + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Lport(0x%x) VPort pool is NULL", + lport->port_id); + + return; + } + + /* Transfer to the transition chain */ + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_destroy_vports); + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, + &lport->list_destroy_vports); + atomic_dec(&vport->lport_ref_cnt); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + while (!list_empty(&lport->list_destroy_vports)) { + node = (&lport->list_destroy_vports)->next; + vport = list_entry(node, struct unf_lport_s, entry_vport); + + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_vports_head); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]VPort(0x%x) Destroy begin", + vport->port_id); + unf_drop_vport(vport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]VPort(0x%x) Destroy end", + vport->port_id); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); +} + diff --git a/drivers/scsi/huawei/hifc/unf_npiv.h b/drivers/scsi/huawei/hifc/unf_npiv.h new file mode 100644 index 000000000000..de9572931b78 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_npiv.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __NPIV_H__ +#define __NPIV_H__ + +/* product VPORT configure */ +struct vport_config_s { + unsigned long long node_name; + unsigned long long port_name; + unsigned int port_mode; /* INI, TGT or both */ +}; + +/* product Vport function */ +#define PORTID_VPINDEX_MASK 0xff000000 +#define PORTID_VPINDEX_SHIT 24 +unsigned int unf_npiv_conf(unsigned int v_port_id, unsigned long long v_wwpn); +struct unf_lport_s *unf_create_vport(struct unf_lport_s *v_lport, + struct vport_config_s *v_vport_config); +unsigned int unf_delete_vport(unsigned int v_port_id, unsigned int v_vp_index); + +/* Vport pool creat and release function */ +unsigned int unf_init_vport_pool(struct unf_lport_s *v_lport); +void unf_free_vport_pool(struct unf_lport_s *v_lport); + +/* Lport resigster stLPortMgTemp function */ +void unf_vport_remove(void *v_vport); +void unf_vport_ref_dec(struct unf_lport_s *v_vport); + +/* linkdown all Vport after receive linkdown event */ +void unf_linkdown_all_vports(void *v_lport); +/* Lport receive Flogi Acc linkup all Vport */ +void unf_linkup_all_vports(struct unf_lport_s *v_lport); +/* Lport remove delete all Vport */ +void unf_destroy_all_vports(struct unf_lport_s *v_lport); +void unf_vport_fabric_logo(struct unf_lport_s *v_vport); +unsigned int unf_destroy_one_vport(struct unf_lport_s *v_vport); +struct unf_lport_s *unf_alloc_vport(struct unf_lport_s *v_lport, + unsigned long long v_wwpn); +unsigned int unf_drop_vport(struct unf_lport_s *v_vport); +void unf_link_down_one_vport(struct unf_lport_s *v_vport); +void *unf_lookup_vport_by_vp_index(void *v_lport, unsigned short v_vp_index); +void *unf_lookup_vport_by_port_id(void *v_lport, unsigned int v_port_id); +void *unf_lookup_vport_by_did(void *v_lport, unsigned int v_did); +void *unf_lookup_vport_by_wwpn(void *v_lport, unsigned long long v_wwpn); + +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_rport.c b/drivers/scsi/huawei/hifc/unf_rport.c new file mode 100644 index 000000000000..3b216763dd81 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_rport.c @@ -0,0 +1,2430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_log.h" +#include "unf_common.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include <scsi/scsi_transport_fc.h> +#include "unf_portman.h" + +/* rport state: */ +/* ready --->>> link_down --->>> cloing --->>> timeout --->>> delete */ + +struct unf_rport_feature_pool_s *port_fea_pool; + +/* + * Function Name : unf_sesion_loss_timeout + * Function Description: session loss timeout + * Input Parameters : struct work_struct *v_work + * Output Parameters : N/A + * Return Type : unsigned int + */ +void unf_sesion_loss_timeout(struct work_struct *v_work) +{ + struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL; + + UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return); + + wwpn_rport_info = container_of(v_work, struct unf_wwpn_rport_info_s, + loss_tmo_work.work); + if (unlikely(!wwpn_rport_info)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]wwpn_rport_info is NULL"); + return; + } + + atomic_set(&wwpn_rport_info->en_scsi_state, UNF_SCSI_ST_DEAD); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x) wwpn(0x%llx) set target(0x%x) scsi state to dead", + ((struct unf_lport_s *)(wwpn_rport_info->lport))->port_id, + wwpn_rport_info->wwpn, + wwpn_rport_info->target_id); +} + +/* + * Function Name : unf_alloc_scsi_id + * Function Description: alloc r_port scsi id + * Input Parameters : struct unf_lport_s *v_lport + * : struct unf_rport_s *v_rport + * Output Parameters : N/A + * Return Type : unsigned int + */ +static unsigned int unf_alloc_scsi_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + unsigned long flags = 0; + unsigned int index = 0; + unsigned int ret = UNF_RETURN_ERROR; + + rport_scsi_table = &v_lport->rport_scsi_table; + UNF_REFERNCE_VAR(ret); + + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); + + /* 1. At first, existence check */ + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (v_rport->port_name == wwn_rport_info->wwpn) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id), + (&wwn_rport_info->loss_tmo_work), + "loss tmo Timer work"); + + /* Plug case: reuse again */ + spin_lock_irqsave( + &rport_scsi_table->scsi_image_table_lock, + flags); + wwn_rport_info->rport = v_rport; + wwn_rport_info->last_en_scis_state = + atomic_read(&wwn_rport_info->en_scsi_state); + atomic_set(&wwn_rport_info->en_scsi_state, + UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]port(0x%x) find the same scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)", + v_lport->port_id, index, + wwn_rport_info->wwpn, + v_rport, v_rport->nport_id); + + atomic_inc(&v_lport->resume_scsi_id); + goto find; + } + } + + /* 2. Alloc new SCSI ID */ + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (wwn_rport_info->wwpn == INVALID_WWPN) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id), + (&wwn_rport_info->loss_tmo_work), + "loss tmo Timer work"); + + /* Use the free space */ + spin_lock_irqsave( + &rport_scsi_table->scsi_image_table_lock, + flags); + wwn_rport_info->rport = v_rport; + wwn_rport_info->wwpn = v_rport->port_name; + wwn_rport_info->last_en_scis_state = + atomic_read(&wwn_rport_info->en_scsi_state); + atomic_set(&wwn_rport_info->en_scsi_state, + UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]port(0x%x) allco new scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)", + v_lport->port_id, index, + wwn_rport_info->wwpn, + v_rport, v_rport->nport_id); + + atomic_inc(&v_lport->alloc_scsi_id); + goto find; + } + } + + /* 3. Reuse space has been used */ + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (atomic_read(&wwn_rport_info->en_scsi_state) == + UNF_SCSI_ST_DEAD) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id), + (&wwn_rport_info->loss_tmo_work), + "loss tmo Timer work"); + + spin_lock_irqsave( + &rport_scsi_table->scsi_image_table_lock, + flags); + if (wwn_rport_info->dfx_counter) { + memset(wwn_rport_info->dfx_counter, 0, + sizeof(struct unf_wwpn_dfx_counter_info_s)); + } + wwn_rport_info->rport = v_rport; + wwn_rport_info->wwpn = v_rport->port_name; + wwn_rport_info->last_en_scis_state = + atomic_read(&wwn_rport_info->en_scsi_state); + atomic_set(&wwn_rport_info->en_scsi_state, + UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[info]port(0x%x) reuse a dead scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)", + v_lport->port_id, index, + wwn_rport_info->wwpn, + v_rport, v_rport->nport_id); + + atomic_inc(&v_lport->reuse_scsi_id); + goto find; + } + } + + spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]port(0x%x) there is not enough scsi_id with max_value(0x%x)", + v_lport->port_id, index); + + return INVALID_VALUE32; + +find: + if (!wwn_rport_info->dfx_counter) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) allocate Rport(0x%x) DFX buffer", + v_lport->port_id, wwn_rport_info->rport->nport_id); + wwn_rport_info->dfx_counter = + vmalloc(sizeof(struct unf_wwpn_dfx_counter_info_s)); + if (!wwn_rport_info->dfx_counter) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) allocate DFX buffer fail", + v_lport->port_id); + + return INVALID_VALUE32; + } + + memset(wwn_rport_info->dfx_counter, 0, + sizeof(struct unf_wwpn_dfx_counter_info_s)); + } + + UNF_REFERNCE_VAR(ret); + return index; +} + +static unsigned int unf_get_scsi_id_by_wwpn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn) +{ + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + unsigned long flags = 0; + unsigned int index = 0; + + UNF_CHECK_VALID(0x3015, UNF_TRUE, + v_lport, return INVALID_VALUE32); + rport_scsi_table = &v_lport->rport_scsi_table; + + if (!v_wwpn) + return INVALID_VALUE32; + + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); + + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (v_wwpn == wwn_rport_info->wwpn) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + return index; + } + } + + spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, + flags); + + return INVALID_VALUE32; +} + +static void unf_set_device_state(struct unf_lport_s *v_lport, + unsigned int v_scsi_id, + int en_scsi_state) +{ + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL; + + if (unlikely(v_scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) RPort scsi_id(0x%x) is max than 0x%x", + v_lport->port_id, v_scsi_id, UNF_MAX_SCSI_ID); + return; + } + + scsi_image_table = &v_lport->rport_scsi_table; + wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[v_scsi_id]; + atomic_set(&wwpn_rport_info->en_scsi_state, en_scsi_state); +} + +static void unf_set_rport_state(struct unf_rport_s *v_rport, + enum unf_rport_login_state_e v_states) +{ + UNF_CHECK_VALID(0x3055, UNF_TRUE, v_rport, return); + + if (v_states != v_rport->rp_state) { + /* Reset R_Port retry count */ + v_rport->retries = 0; + } + + v_rport->rp_state = v_states; +} + +void unf_rport_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* + * 1. port_logout + * 2. rcvd_rscn_port_not_in_disc + * 3. each_rport_after_rscn + * 4. rcvd_gpnid_rjt + * 5. rport_after_logout(rport is fabric port) + */ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3000, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3001, UNF_TRUE, v_rport, return); + UNF_REFERNCE_VAR(v_lport); + + /* 1. Update R_Port state: Link Down Event --->>> closing state */ + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* 3. Port enter closing (then enter to Delete) process */ + unf_rport_enter_closing(v_rport); +} + +static struct unf_rport_s *unf_rport_is_changed(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid) +{ + if (v_rport) { + /* S_ID or D_ID has been changed */ + if ((v_rport->nport_id != v_sid) || + (v_rport->local_nport_id != v_lport->nport_id)) { + /* + * 1. Swap case: (SID or DID changed): + * Report link down & delete immediately + */ + unf_rport_immediate_linkdown(v_lport, v_rport); + return NULL; + } + } + + return v_rport; +} + +struct unf_rport_s *unf_rport_set_qualifier_key_reuse( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport_by_nport_id, + struct unf_rport_s *v_rport_by_wwpn, + unsigned long long v_wwpn, + unsigned int v_sid) +{ + /* Used for HIFC Chip */ + struct unf_rport_s *rport = NULL; + struct unf_rport_s *rporta = NULL; + struct unf_rport_s *rportb = NULL; + int bwwpn_flag = 0; + + UNF_CHECK_VALID(0x3002, UNF_TRUE, v_lport, return NULL); + + /* About R_Port by N_Port_ID */ + rporta = unf_rport_is_changed(v_lport, v_rport_by_nport_id, v_sid); + /* About R_Port by WWpn */ + rportb = unf_rport_is_changed(v_lport, v_rport_by_wwpn, v_sid); + + if (!rporta && !rportb) { + return NULL; + } else if (!rporta && rportb) { + /* 3. Plug case: reuse again */ + rport = rportb; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by wwpn", + v_lport->port_id, rport, rport->port_name, + rport->nport_id, rport->local_nport_id); + + return rport; /* Get by WWPN */ + } else if (rporta && !rportb) { + bwwpn_flag = ((rporta->port_name != v_wwpn) && + (rporta->port_name != 0) && + (rporta->port_name != INVALID_VALUE64)); + if (bwwpn_flag) { + /* 4. WWPN changed: Report link down + * & delete immediately + */ + unf_rport_immediate_linkdown(v_lport, rporta); + return NULL; + } + + /* Updtae WWPN */ + rporta->port_name = v_wwpn; + rport = rporta; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by N_Port_ID", + v_lport->port_id, + rport, rport->port_name, + rport->nport_id, rport->local_nport_id); + + return rport; /* Get by N_Port_ID */ + } + + /* 5. Case for A == B && A && B */ + if (rporta == rportb) { + rport = rporta; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) find the same RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x)", + v_lport->port_id, + rport, rport->port_name, + rport->nport_id, rport->local_nport_id); + + return rport; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]port(0x%x) find two duplicate login. rport(A:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x) rport(B:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x)", + v_lport->port_id, + rporta, rporta->port_name, + rporta->nport_id, rporta->local_nport_id, + rportb, rportb->port_name, + rportb->nport_id, rportb->local_nport_id); + + /* 6. Case for A != B && A && B */ + unf_rport_immediate_linkdown(v_lport, rporta); + unf_rport_immediate_linkdown(v_lport, rportb); + + return NULL; +} + +struct unf_rport_s *unf_get_rport_by_wwn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn) +{ + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + struct unf_rport_s *find_rport = NULL; + + UNF_CHECK_VALID(0x3049, UNF_TRUE, v_lport, return NULL); + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + + /* for each r_port from busy_list: compare wwpn(port name) */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + if (rport && rport->port_name == v_wwpn) { + find_rport = rport; + + break; + } + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + return find_rport; +} + +struct unf_rport_s *unf_find_valid_rport(struct unf_lport_s *v_lport, + unsigned long long v_wwpn, + unsigned int v_sid) +{ + struct unf_rport_s *rport = NULL; + struct unf_rport_s *rport_by_nport_id = NULL; + struct unf_rport_s *rport_by_wwpn = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3005, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3006, UNF_TRUE, + v_lport->pfn_unf_qualify_rport, return NULL); + + /* Get R_Port by WWN & N_Port_ID */ + rport_by_nport_id = unf_get_rport_by_nport_id(v_lport, v_sid); + rport_by_wwpn = unf_get_rport_by_wwn(v_lport, v_wwpn); + + /* R_Port check: by WWPN */ + if (rport_by_wwpn) { + spin_lock_irqsave(&rport_by_wwpn->rport_state_lock, flags); + if (rport_by_wwpn->nport_id == UNF_FC_FID_FLOGI) { + spin_unlock_irqrestore( + &rport_by_wwpn->rport_state_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[err]Port(0x%x) RPort(0x%p) find by WWPN(0x%llx) is invalid", + v_lport->port_id, rport_by_wwpn, v_wwpn); + + rport_by_wwpn = NULL; + } else { + spin_unlock_irqrestore( + &rport_by_wwpn->rport_state_lock, + flags); + } + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%p) find by N_Port_ID(0x%x) and RPort(0x%p) by WWPN(0x%llx)", + v_lport->port_id, v_lport->nport_id, + rport_by_nport_id, v_sid, rport_by_wwpn, v_wwpn); + + /* R_Port validity check: get by WWPN & N_Port_ID */ + rport = v_lport->pfn_unf_qualify_rport(v_lport, rport_by_nport_id, + rport_by_wwpn, + v_wwpn, v_sid); + return rport; +} + +void unf_rport_delay_login(struct unf_rport_s *v_rport) +{ + UNF_CHECK_VALID(0x3009, UNF_TRUE, v_rport, return); + + /* Do R_Port recovery: PLOGI or PRLI or LOGO */ + unf_rport_error_recovery(v_rport); +} + +unsigned int unf_rport_ref_inc(struct unf_rport_s *v_rport) +{ + UNF_CHECK_VALID(0x3010, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + if (atomic_read(&v_rport->rport_ref_cnt) <= 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Rport(0x%x) reference count is wrong %d", + v_rport->nport_id, + atomic_read(&v_rport->rport_ref_cnt)); + return UNF_RETURN_ERROR; + } + + atomic_inc(&v_rport->rport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Rport(0x%x) reference count is %d", + v_rport->nport_id, atomic_read(&v_rport->rport_ref_cnt)); + + return RETURN_OK; +} + +void unf_rport_enter_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* + * 1. TMF/ABTS timeout recovery :Y + * 2. L_Port error recovery --->>> larger than retry_count :Y + * 3. R_Port error recovery --->>> larger than retry_count :Y + * 4. Check PLOGI parameters --->>> parameter is error :Y + * 5. PRLI handler --->>> R_Port state is error :Y + * 6. PDISC handler --->>> R_Port state is not PRLI_WAIT :Y + * 7. ADISC handler --->>> R_Port state is not PRLI_WAIT :Y + * 8. PLOGI wait timeout with R_PORT is INI mode :Y + * 9. RCVD GFFID_RJT --->>> R_Port state is INIT :Y + * 10. RCVD GPNID_ACC --->>> R_Port state is error :Y + * 11. Private Loop mode with LOGO case :Y + * 12. P2P mode with LOGO case :Y + * 13. Fabric mode with LOGO case :Y + * 14. RCVD PRLI_ACC with R_Port is INI :Y + * 15. TGT RCVD BLS_REQ with session is error :Y + */ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3013, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3014, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + + if ((v_rport->rp_state == UNF_RPORT_ST_CLOSING) || + (v_rport->rp_state == UNF_RPORT_ST_DELETE)) { + /* 1. Already within Closing or Delete: Do nothing */ + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + return; + } else if (v_rport->rp_state == UNF_RPORT_ST_LOGO) { + /* 2. Update R_Port state: + * Normal Enter Event --->>> closing state + */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* Send Logo if necessary */ + if (unf_send_logo(v_lport, v_rport) != RETURN_OK) + unf_rport_enter_closing(v_rport); + } else { + /* + * 3. Update R_Port state: Link Down Event --->>> closing state + * enter closing state + */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + unf_rport_enter_closing(v_rport); + } +} + +unsigned int unf_free_scsi_id(struct unf_lport_s *v_lport, + unsigned int v_scsi_id) +{ + unsigned long flags = 0; + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + + UNF_CHECK_VALID(0x3016, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + if (unlikely(v_lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) is removing and do nothing", + v_lport->port_id, v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + if (unlikely(v_scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) scsi_id(0x%x) is bigger than %d", + v_lport->port_id, v_lport->nport_id, + v_scsi_id, UNF_MAX_SCSI_ID); + + return UNF_RETURN_ERROR; + } + + rport_scsi_table = &v_lport->rport_scsi_table; + if (rport_scsi_table->wwn_rport_info_table) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x_0x%x) RPort(0x%p) free scsi_id(0x%x) wwpn(0x%llx) target_id(0x%x) succeed", + v_lport->port_id, v_lport->nport_id, + rport_scsi_table->wwn_rport_info_table[v_scsi_id].rport, + v_scsi_id, + rport_scsi_table->wwn_rport_info_table[v_scsi_id].wwpn, + rport_scsi_table->wwn_rport_info_table[v_scsi_id].target_id); + + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, + flags); + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[v_scsi_id]; + if (wwn_rport_info->rport) { + wwn_rport_info->rport->rport = NULL; + wwn_rport_info->rport = NULL; + } + + wwn_rport_info->target_id = INVALID_VALUE32; + atomic_set(&wwn_rport_info->en_scsi_state, UNF_SCSI_ST_DEAD); + + /* NOTE: remain WWPN/Port_Name unchanged(un-cleared) */ + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + return RETURN_OK; + } + + return UNF_RETURN_ERROR; +} + +static void unf_report_ini_linkup_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + UNF_CHECK_VALID(0x3031, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3032, UNF_TRUE, v_rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[event]Port(0x%x) RPort(0x%x_0x%p) put INI link up work(%p) to work_queue", + v_lport->port_id, v_rport->nport_id, v_rport, + &v_rport->start_work); + + if (unlikely(!queue_work(v_lport->link_event_wq, + &v_rport->start_work))) { + atomic_inc(&v_lport->add_start_work_failed); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Port(0x%x) RPort(0x%x_0x%p) put INI link up to work_queue failed", + v_lport->port_id, v_rport->nport_id, v_rport); + } +} + +static void unf_report_ini_linkdown_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned int scsi_id = 0; + struct fc_rport *rport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3033, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3034, UNF_TRUE, v_rport, return); + + /* + * 1. set local device(rport/rport_info_table) state + * -------------------------------------------------OFF_LINE + ** + * about rport->scsi_id + * valid during rport link up to link down + */ + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + scsi_id = v_rport->scsi_id; + unf_set_device_state(v_lport, scsi_id, UNF_SCSI_ST_OFFLINE); + + /* 2. delete scsi's rport */ + rport = (struct fc_rport *)v_rport->rport; + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + if (rport) { + fc_remote_port_delete(rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]port(0x%x_0x%x) delete rport(0x%x) wwpn(0x%llx) scsi_id(0x%x) succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, + v_rport->port_name, scsi_id); + + atomic_inc(&v_lport->scsi_session_del_success); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x_0x%x) delete RPort(0x%x_0x%p) failed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport); + + atomic_inc(&v_lport->scsi_session_del_failed); + } +} + +void unf_update_lport_state_by_linkup_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int rport_att) +{ + /* Report R_Port Link Up/Down Event */ + unsigned long flag = 0; + enum unf_port_state_e en_lport_state = 0; + + UNF_CHECK_VALID(0x3019, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3020, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + + /* 1. R_Port does not has TGT mode any more */ + if (!(rport_att & UNF_FC4_FRAME_PARM_3_TGT) && + (v_rport->lport_ini_state == UNF_PORT_STATE_LINKUP)) { + v_rport->last_lport_ini_state = v_rport->lport_ini_state; + // L_Port INI mode: Down + v_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) does not have TGT attribute(0x%x) any more", + v_lport->port_id, v_rport->nport_id, rport_att); + } + + /* 2. R_Port with TGT mode, L_Port with INI mode */ + if ((rport_att & UNF_FC4_FRAME_PARM_3_TGT) && + (v_lport->options & UNF_FC4_FRAME_PARM_3_INI)) { + v_rport->last_lport_ini_state = v_rport->lport_ini_state; + // L_Port INI mode: Up + v_rport->lport_ini_state = UNF_PORT_STATE_LINKUP; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) update INI state with last(0x%x) and now(0x%x)", + v_lport->port_id, v_rport->last_lport_ini_state, + v_rport->lport_ini_state); + } + + /* 3. Report L_Port INI/TGT Down/Up event to SCSI */ + if (v_rport->last_lport_ini_state == v_rport->lport_ini_state) { + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed", + v_lport->port_id, v_rport->nport_id, v_rport, + v_rport->lport_ini_state); + } + + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + return; + } + + en_lport_state = v_rport->lport_ini_state; + + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + switch (en_lport_state) { + /* Link Down */ + case UNF_PORT_STATE_LINKDOWN: + unf_report_ini_linkdown_event(v_lport, v_rport); + break; + + /* Link Up */ + case UNF_PORT_STATE_LINKUP: + unf_report_ini_linkup_event(v_lport, v_rport); + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) with unknown link status(0x%x)", + v_lport->port_id, v_rport->lport_ini_state); + break; + } +} + +static void unf_rport_call_back(void *v_rport, + void *v_lport, + unsigned int v_result) +{ + /* Report R_Port link down event */ + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(v_result); + + UNF_CHECK_VALID(0x3037, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3038, UNF_TRUE, v_lport, return); + rport = (struct unf_rport_s *)v_rport; + lport = (struct unf_lport_s *)v_lport; + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->last_lport_ini_state = rport->lport_ini_state; + rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + rport->last_lport_tgt_state = rport->lport_tgt_state; + rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + + /* Report R_Port Link Down Event to scsi */ + if (rport->last_lport_ini_state == rport->lport_ini_state) { + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed", + lport->port_id, rport->nport_id, rport, + rport->lport_ini_state); + } + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + return; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_report_ini_linkdown_event(lport, rport); +} + +static void unf_rport_recovery_timeout(struct work_struct *v_work) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + enum unf_rport_login_state_e en_rp_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3039, UNF_TRUE, v_work, return); + + rport = container_of(v_work, struct unf_rport_s, recovery_work.work); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, "[err]RPort is NULL"); + + return; + } + + lport = rport->lport; + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x) Port is NULL", + rport->nport_id); + + /* for timer */ + unf_rport_ref_dec(rport); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + en_rp_state = rport->rp_state; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) state(0x%x) recovery timer timeout", + lport->port_id, lport->nport_id, + rport->nport_id, en_rp_state); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + switch (en_rp_state) { + case UNF_RPORT_ST_PLOGI_WAIT: + if (((lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) && + (lport->port_name > rport->port_name)) || + lport->en_act_topo != UNF_ACT_TOP_P2P_DIRECT) { + /* P2P: Name is master with P2P_D or has INI Mode */ + ret = unf_send_plogi(rport->lport, rport); + } + break; + + case UNF_RPORT_ST_PRLI_WAIT: + ret = unf_send_prli(rport->lport, rport); + break; + + default: + break; + } + + if (ret != RETURN_OK) + unf_rport_error_recovery(rport); + + /* company with timer */ + unf_rport_ref_dec(rport); +} + +static unsigned int unf_get_dev_loss_tmo_by_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct fc_rport *rport = (struct fc_rport *)v_rport->rport; + + if (rport) + return rport->dev_loss_tmo; + else + return (unsigned int)unf_get_link_lose_tmo(v_lport); +} + +void unf_schedule_closing_work(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned long flags = 0; + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + unsigned int scsi_id = 0; + unsigned int ret = 0; + unsigned int delay = 0; + + UNF_CHECK_VALID(0x3561, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3562, UNF_TRUE, v_rport, return); + + delay = unf_get_dev_loss_tmo_by_rport(v_lport, v_rport); + rport_scsi_table = &v_lport->rport_scsi_table; + scsi_id = v_rport->scsi_id; + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + + /* 1. Cancel recovery_work */ + if (cancel_delayed_work(&v_rport->recovery_work)) { + atomic_dec(&v_rport->rport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel recovery work succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport); + } + + /* 2. Cancel Open_work */ + if (cancel_delayed_work(&v_rport->open_work)) { + atomic_dec(&v_rport->rport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel open work succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport); + } + + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* 3. Work in-queue (switch to thread context) */ + if (!queue_work(v_lport->link_event_wq, &v_rport->closing_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_ERR, + "[warn]Port(0x%x) RPort(0x%x_0x%p) add link down to work queue failed", + v_lport->port_id, v_rport->nport_id, v_rport); + + atomic_inc(&v_lport->add_closing_work_failed); + + } else { + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + (void)unf_rport_ref_inc(v_rport); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x_0x%p) add link down to work(%p) queue succeed", + v_lport->port_id, v_rport->nport_id, v_rport, + &v_rport->closing_work); + } + + if (v_rport->nport_id > UNF_FC_FID_DOM_MGR) + return; + + if (scsi_id >= UNF_MAX_SCSI_ID) { + scsi_id = unf_get_scsi_id_by_wwpn(v_lport, v_rport->port_name); + if (scsi_id >= UNF_MAX_SCSI_ID) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%p) NPortId(0x%x) wwpn(0x%llx) option(0x%x) scsi_id(0x%x) is max than(0x%x)", + v_lport->port_id, v_rport, v_rport->nport_id, + v_rport->port_name, + v_rport->options, scsi_id, + UNF_MAX_SCSI_ID); + + return; + } + } + + wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; + ret = queue_delayed_work( + unf_work_queue, + &wwn_rport_info->loss_tmo_work, + (unsigned long)delay * msecs_to_jiffies(1000)); + if (!ret) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info] Port(0x%x) add RPort(0x%p) NPortId(0x%x) scsi_id(0x%x) wwpn(0x%llx) loss timeout work failed", + v_lport->port_id, v_rport, + v_rport->nport_id, scsi_id, + v_rport->port_name); + } +} + +static void unf_rport_closing_timeout(struct work_struct *v_work) +{ + /* closing --->>>(timeout)--->>> delete */ + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + unsigned long rport_flag = 0; + unsigned long disc_flag = 0; + void (*pfn_unf_rport_call_back)(void *, void *, unsigned int) = NULL; + + UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return); + + /* Get R_Port & L_Port & Disc */ + rport = container_of(v_work, struct unf_rport_s, closing_work); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, "[err]RPort is NULL"); + return; + } + + lport = rport->lport; + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x_0x%p) Port is NULL", + rport->nport_id, rport); + + /* Release directly (for timer) */ + unf_rport_ref_dec(rport); + return; + } + disc = &lport->disc; + + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + + /* 1. Update R_Port state: event_timeout --->>> state_delete */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_CLS_TIMEOUT); + + /* Check R_Port state */ + if (rport->rp_state != UNF_RPORT_ST_DELETE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) RPort(0x%x) closing timeout with error state(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + + /* Dec ref_cnt for timer */ + unf_rport_ref_dec(rport); + return; + } + + pfn_unf_rport_call_back = rport->pfn_unf_rport_call_back; + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + + /* 2. Put R_Port to delete list */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + list_del_init(&rport->entry_rport); + list_add_tail(&rport->entry_rport, &disc->list_delete_rports); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + /* 3. Report rport link down event to scsi */ + if (pfn_unf_rport_call_back) { /* unf_rport_call_back */ + pfn_unf_rport_call_back((void *)rport, (void *)rport->lport, + RETURN_OK); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x) callback is NULL", + rport->nport_id); + } + + /* 4. Remove/delete R_Port */ + unf_rport_ref_dec(rport); + unf_rport_ref_dec(rport); +} + +static void unf_rport_linkup_to_scsi(struct work_struct *v_work) +{ + struct fc_rport_identifiers rport_ids; + struct fc_rport *rport = NULL; + unsigned long flags = RETURN_OK; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + unsigned int scsi_id = 0; + + struct unf_lport_s *lport = NULL; + struct unf_rport_s *unf_rport = NULL; + + UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return); + + unf_rport = container_of(v_work, struct unf_rport_s, start_work); + if (unlikely(!unf_rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort is NULL for work(%p)", v_work); + return; + } + + lport = unf_rport->lport; + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x_0x%p) Port is NULL", + unf_rport->nport_id, unf_rport); + return; + } + + /* 1. Alloc R_Port SCSI_ID (image table) */ + unf_rport->scsi_id = unf_alloc_scsi_id(lport, unf_rport); + if (unlikely(unf_rport->scsi_id == INVALID_VALUE32)) { + atomic_inc(&lport->scsi_session_add_failed); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) scsi_id(0x%x) is invalid", + lport->port_id, lport->nport_id, + unf_rport->nport_id, unf_rport, + unf_rport->port_name, unf_rport->scsi_id); + + /* NOTE: return */ + return; + } + + /* 2. Add rport to scsi */ + scsi_id = unf_rport->scsi_id; + rport_ids.node_name = unf_rport->node_name; + rport_ids.port_name = unf_rport->port_name; + rport_ids.port_id = unf_rport->nport_id; + rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; + rport = fc_remote_port_add(lport->host_info.p_scsi_host, + 0, &rport_ids); + if (unlikely(!rport)) { + atomic_inc(&lport->scsi_session_add_failed); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) report link up to scsi failed", + lport->port_id, lport->nport_id, + unf_rport->nport_id, unf_rport, + unf_rport->port_name); + + unf_free_scsi_id(lport, scsi_id); + return; + } + + /* 3. Change rport role save local SCSI_ID to scsi rport */ + *((unsigned int *)rport->dd_data) = scsi_id; + rport->supported_classes = FC_COS_CLASS3; + rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; + fc_remote_port_rolechg(rport, rport_ids.roles); + + /* 4. Save scsi rport info to local R_Port */ + spin_lock_irqsave(&unf_rport->rport_state_lock, flags); + unf_rport->rport = rport; + spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); + + rport_scsi_table = &lport->rport_scsi_table; + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); + wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; + wwn_rport_info->target_id = rport->scsi_target_id; + wwn_rport_info->rport = unf_rport; + atomic_set(&wwn_rport_info->en_scsi_state, UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]port(0x%x_0x%x) rport(0x%x) wwpn(0x%llx) scsi_id(0x%x) link up to scsi succeed", + lport->port_id, lport->nport_id, + unf_rport->nport_id, unf_rport->port_name, + scsi_id); + + atomic_inc(&lport->scsi_session_add_success); +} + +static void unf_rport_open_timeout(struct work_struct *v_work) +{ + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3041, UNF_TRUE, v_work, return); + + rport = container_of(v_work, struct unf_rport_s, open_work.work); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort is NULL"); + + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flags); + lport = rport->lport; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) open work timeout with state(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + /* NOTE: R_Port state check */ + if (rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) { + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Dec ref_cnt for timer case */ + unf_rport_ref_dec(rport); + return; + } + + /* Report R_Port Link Down event */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_rport_enter_closing(rport); + + /* Dec ref_cnt for timer case */ + unf_rport_ref_dec(rport); + + UNF_REFERNCE_VAR(lport); +} + +static unsigned int unf_alloc_index_for_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned long rport_flag = 0; + unsigned long pool_flag = 0; + unsigned int alloc_indx = 0; + unsigned int max_rport = 0; + struct unf_rport_pool_s *rport_pool = NULL; + + rport_pool = &v_lport->rport_pool; + max_rport = v_lport->low_level_func.lport_cfg_items.max_login; + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, pool_flag); + while (alloc_indx < max_rport) { + if (!test_bit((int)alloc_indx, rport_pool->pul_rpi_bitmap)) { + /* Case for HIFC */ + if (unlikely(atomic_read( + &v_lport->port_no_operater_flag) == + UNF_LPORT_NOP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is within NOP", + v_lport->port_id); + + spin_unlock_irqrestore( + &rport_pool->rport_free_pool_lock, + pool_flag); + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&v_rport->rport_state_lock, + rport_flag); + /* set R_Port index */ + v_rport->rport_index = alloc_indx; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) alloc index(0x%x) succeed", + v_lport->port_id, alloc_indx, + v_rport->nport_id); + + spin_unlock_irqrestore(&v_rport->rport_state_lock, + rport_flag); + + /* Set (index) bit */ + set_bit((int)alloc_indx, rport_pool->pul_rpi_bitmap); + + /* Break here */ + break; + } + alloc_indx++; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, pool_flag); + + if (alloc_indx == max_rport) + return UNF_RETURN_ERROR; + else + return RETURN_OK; +} + +static void unf_check_rport_pool_status(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_pool_s *rport_pool = NULL; + unsigned long flags = 0; + unsigned int max_rport = 0; + + UNF_CHECK_VALID(0x3045, UNF_TRUE, v_lport, return); + rport_pool = &lport->rport_pool; + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flags); + max_rport = lport->low_level_func.lport_cfg_items.max_login; + if ((rport_pool->rport_pool_completion) && + (max_rport == rport_pool->rport_pool_count)) { + complete(rport_pool->rport_pool_completion); + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flags); +} + +void unf_init_rport_params(struct unf_rport_s *v_rport, + struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3046, UNF_TRUE, rport, return); + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_set_rport_state(rport, UNF_RPORT_ST_INIT); + /* set callback function */ + rport->pfn_unf_rport_call_back = unf_rport_call_back; + rport->lport = v_lport; + rport->fcp_conf_needed = UNF_FALSE; + rport->tape_support_needed = UNF_FALSE; + rport->mas_retries = UNF_MAX_RETRY_COUNT; + rport->logo_retries = 0; + rport->retries = 0; + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN; + rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + rport->node_name = 0; + rport->port_name = INVALID_WWPN; + rport->disc_done = 0; + rport->scsi_id = INVALID_VALUE32; + rport->data_thread = NULL; + sema_init(&rport->task_sema, 0); + atomic_set(&rport->rport_ref_cnt, 0); + atomic_set(&rport->pending_io_cnt, 0); + rport->rport_alloc_jifs = jiffies; + + rport->ed_tov = UNF_DEFAULT_EDTOV + 500; + rport->ra_tov = UNF_DEFAULT_RATOV; + + INIT_WORK(&rport->closing_work, unf_rport_closing_timeout); + INIT_WORK(&rport->start_work, unf_rport_linkup_to_scsi); + INIT_DELAYED_WORK(&rport->recovery_work, unf_rport_recovery_timeout); + INIT_DELAYED_WORK(&rport->open_work, unf_rport_open_timeout); + + atomic_inc(&rport->rport_ref_cnt); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); +} + +static unsigned int unf_alloc_llrport_resource(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_nport_id) +{ + unsigned int ret = RETURN_OK; + struct unf_rport_info_s rport_info = { 0 }; + + struct unf_lport_s *lport = NULL; + + lport = v_lport->root_lport; + + if (lport->low_level_func.service_op.pfn_unf_alloc_rport_res) { + rport_info.nport_id = v_nport_id; + rport_info.rport_index = v_rport->rport_index; + rport_info.local_nport_id = v_lport->nport_id; /* sid */ + rport_info.port_name = 0; + + ret = lport->low_level_func.service_op.pfn_unf_alloc_rport_res( + lport->fc_port, + &rport_info); + } else { + ret = RETURN_OK; + } + + return ret; +} + +static void *unf_add_rport_to_busy_list(struct unf_lport_s *v_lport, + struct unf_rport_s *v_new_rport, + unsigned int v_nport_id) +{ + struct unf_rport_pool_s *rport_pool = NULL; + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rport_s *new_rport = v_new_rport; + struct unf_rport_s *old_rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_new_rport, return NULL); + + lport = v_lport->root_lport; + disc = &v_lport->disc; + UNF_CHECK_VALID(0x3046, UNF_TRUE, lport, return NULL); + rport_pool = &lport->rport_pool; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + /* According to N_Port_ID */ + old_rport = list_entry(node, struct unf_rport_s, entry_rport); + if (old_rport->nport_id == v_nport_id) + break; /* find by N_Port_ID */ + old_rport = NULL; + } + + if (old_rport) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Use old R_Port & Add new R_Port back to R_Port Pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + clear_bit((int)new_rport->rport_index, + rport_pool->pul_rpi_bitmap); + list_add_tail(&new_rport->entry_rport, + &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, + flag); + + unf_check_rport_pool_status(lport); + return (void *)old_rport; + } + + if (unf_alloc_llrport_resource(v_lport, new_rport, + v_nport_id != RETURN_OK)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Add new R_Port back to R_Port Pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + clear_bit((int)new_rport->rport_index, + rport_pool->pul_rpi_bitmap); + list_add_tail(&new_rport->entry_rport, + &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore( + &rport_pool->rport_free_pool_lock, flag); + + unf_check_rport_pool_status(lport); + + return NULL; + } + + /* Add new R_Port to busy list */ + list_add_tail(&new_rport->entry_rport, + &disc->list_busy_rports); + new_rport->nport_id = v_nport_id; /* set R_Port N_Port_ID */ + /* set L_Port N_Port_ID */ + new_rport->local_nport_id = v_lport->nport_id; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + unf_init_rport_params(new_rport, v_lport); + + return (void *)new_rport; +} + +void *unf_rport_get_free_and_init(void *v_lport, + unsigned int v_rport_type, + unsigned int v_nport_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_pool_s *rport_pool = NULL; + struct unf_disc_s *disc = NULL; + struct unf_disc_s *v_port_disc = NULL; + struct unf_rport_s *rport = NULL; + struct list_head *list_head = NULL; + unsigned long flag = 0; + struct unf_disc_rport_s *disc_rport = NULL; + + UNF_REFERNCE_VAR(v_rport_type); + UNF_REFERNCE_VAR(rport); + + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return NULL); + lport = ((struct unf_lport_s *)v_lport)->root_lport; /* ROOT L_Port */ + UNF_CHECK_VALID(0x3047, UNF_TRUE, lport, return NULL); + + /* Check L_Port state: NOP */ + if (unlikely(atomic_read(&lport->port_no_operater_flag) == + UNF_LPORT_NOP)) { + return NULL; + } + + rport_pool = &lport->rport_pool; + disc = &lport->disc; + + /* 1. UNF_PORT_TYPE_DISC: Get from disc_rport_pool */ + if (v_rport_type == UNF_PORT_TYPE_DISC) { + v_port_disc = &(((struct unf_lport_s *)v_lport)->disc); + + /* NOTE: list_disc_rports_pool used + * with list_disc_rport_busy + */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if (!list_empty(&disc->disc_rport_mgr.list_disc_rports_pool)) { + /* Get & delete from Disc R_Port Pool & + * Add it to Busy list + */ + list_head = + (&disc->disc_rport_mgr.list_disc_rports_pool)->next; + list_del_init(list_head); + disc_rport = list_entry(list_head, + struct unf_disc_rport_s, + entry_rport); + /* Set R_Port N_Port_ID */ + disc_rport->nport_id = v_nport_id; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + /* Add to list_disc_rport_busy */ + spin_lock_irqsave(&v_port_disc->rport_busy_pool_lock, + flag); + list_add_tail( + list_head, + &v_port_disc->disc_rport_mgr.list_disc_rport_busy); + spin_unlock_irqrestore( + &v_port_disc->rport_busy_pool_lock, flag); + } else { + disc_rport = NULL; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } + + /* NOTE: return */ + return disc_rport; + } + + /* 2. UNF_PORT_TYPE_FC (rport_pool): Get from list_rports_pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + if (!list_empty(&rport_pool->list_rports_pool)) { + /* Get & delete from R_Port free Pool */ + list_head = (&rport_pool->list_rports_pool)->next; + list_del_init(list_head); + rport_pool->rport_pool_count--; + rport = list_entry(list_head, struct unf_rport_s, entry_rport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort pool is empty", + lport->port_id, lport->nport_id); + + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, + flag); + + /* NOTE: return */ + return NULL; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + /* 3. Alloc (& set bit) R_Port index */ + if (unf_alloc_index_for_rport(lport, rport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate index for new RPort failed", + lport->nport_id); + + /* Alloc failed: Add R_Port back to R_Port Pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + list_add_tail(&rport->entry_rport, + &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, + flag); + + unf_check_rport_pool_status(lport); + return NULL; + } + + /* 4. Add R_Port to busy list */ + rport = unf_add_rport_to_busy_list(v_lport, rport, v_nport_id); + UNF_REFERNCE_VAR(rport); + + return (void *)rport; +} + +static void unf_reset_rport_attribute(struct unf_rport_s *v_rport) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3070, 1, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + v_rport->pfn_unf_rport_call_back = NULL; + v_rport->lport = NULL; + v_rport->node_name = INVALID_VALUE64; + v_rport->port_name = INVALID_WWPN; + v_rport->nport_id = INVALID_VALUE32; + v_rport->local_nport_id = INVALID_VALUE32; + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; + v_rport->ed_tov = UNF_DEFAULT_EDTOV; + v_rport->ra_tov = UNF_DEFAULT_RATOV; + v_rport->rport_index = INVALID_VALUE32; + v_rport->scsi_id = INVALID_VALUE32; + v_rport->rport_alloc_jifs = INVALID_VALUE64; + + /* ini or tgt */ + v_rport->options = 0; + + /* fcp conf */ + v_rport->fcp_conf_needed = UNF_FALSE; + + /* special req retry times */ + v_rport->retries = 0; + v_rport->logo_retries = 0; + + /* special req retry times */ + v_rport->mas_retries = UNF_MAX_RETRY_COUNT; + + /* for target mode */ + v_rport->session = NULL; + v_rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN; + v_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + v_rport->rp_state = UNF_RPORT_ST_INIT; + v_rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + v_rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + v_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + v_rport->disc_done = 0; + + /* for scsi */ + v_rport->data_thread = NULL; + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); +} + +static unsigned int unf_rport_remove(void *v_rport) +{ + /* remove_old_rport/... --->>> rport_ref_dec --->>> rport_remove */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_rport_pool_s *rport_pool = NULL; + unsigned long flag = 0; + unsigned int rport_index = 0; + + UNF_CHECK_VALID(0x3050, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + rport = (struct unf_rport_s *)v_rport; + lport = rport->lport; + UNF_CHECK_VALID(0x3051, UNF_TRUE, + lport, return UNF_RETURN_ERROR); + rport_pool = &((struct unf_lport_s *)lport->root_lport)->rport_pool; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Remove RPort(0x%p) with remote_nport_id(0x%x) local_nport_id(0x%x)", + rport, rport->nport_id, rport->local_nport_id); + + /* 1. Terminate open exchange before rport remove: set ABORT tag */ + unf_cm_xchg_mgr_abort_io_by_id(lport, rport, + rport->nport_id, lport->nport_id, 0); + + /* 2. Abort sfp exchange before rport remove */ + unf_cm_xchg_mgr_abort_sfs_by_id(lport, rport, + rport->nport_id, lport->nport_id); + + /* 3. Release R_Port resource: session reset/delete */ + (void)unf_release_rport_res(lport, rport); + + /* 4.1 Delete R_Port from disc destroy/delete list */ + spin_lock_irqsave(&lport->disc.rport_busy_pool_lock, flag); + list_del_init(&rport->entry_rport); + spin_unlock_irqrestore(&lport->disc.rport_busy_pool_lock, flag); + + rport_index = rport->rport_index; /* according to bitmap */ + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) release RPort(0x%x_%p) with index(0x%x)", + lport->port_id, rport->nport_id, rport, rport->rport_index); + + unf_reset_rport_attribute(rport); + + /* 4.2 Add rport to --->>> rport_pool (free pool) & clear bitmap */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + if (lport->low_level_func.rport_release_type == + UNF_LOW_LEVEL_RELEASE_RPORT_SYNC) { + clear_bit((int)rport_index, rport_pool->pul_rpi_bitmap); + } + list_add_tail(&rport->entry_rport, &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + unf_check_rport_pool_status((struct unf_lport_s *)lport->root_lport); + up(&rport->task_sema); + + return RETURN_OK; +} + +void unf_rport_ref_dec(struct unf_rport_s *v_rport) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3011, UNF_TRUE, v_rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Rport(0x%x) reference count is %d", + v_rport->nport_id, atomic_read(&v_rport->rport_ref_cnt)); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + if (atomic_dec_and_test(&v_rport->rport_ref_cnt)) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + (void)unf_rport_remove(v_rport); + } else { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + } +} + +static enum unf_rport_login_state_e unf_rport_stat_init( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_ENTER_PLOGI: /* PLOGI --->>> PLOGI_WAIT */ + en_next_state = UNF_RPORT_ST_PLOGI_WAIT; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> Closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_plogi_wait( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_ENTER_PRLI: /* PRLI --->>> PRLI_WAIT */ + en_next_state = UNF_RPORT_ST_PRLI_WAIT; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* Recovery --->>> Ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_prli_wait( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_READY: /* Ready --->>> Ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> Closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* Recovery --->>> Ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_ready( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_ENTER_PLOGI: /* ready --->>> plogi_wait */ + en_next_state = UNF_RPORT_ST_PLOGI_WAIT; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_closing( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_CLS_TIMEOUT: /* timeout --->>> delete */ + en_next_state = UNF_RPORT_ST_DELETE; + break; + + case UNF_EVENT_RPORT_RELOGIN: /* relogin --->>> INIT */ + en_next_state = UNF_RPORT_ST_INIT; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* recovery --->>> ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_logo( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_NORMAL_ENTER: /* normal enter --->>> closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* recovery --->>> ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +void unf_rport_state_ma(struct unf_rport_s *v_rport, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_old_state = UNF_RPORT_ST_INIT; + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3056, UNF_TRUE, v_rport, return); + + en_old_state = v_rport->rp_state; + + switch (v_rport->rp_state) { + /* State INIT */ + case UNF_RPORT_ST_INIT: + en_next_state = unf_rport_stat_init(en_old_state, v_event); + break; + + /* State PLOGI Wait */ + case UNF_RPORT_ST_PLOGI_WAIT: + en_next_state = unf_rport_stat_plogi_wait(en_old_state, + v_event); + break; + + /* State PRLI Wait */ + case UNF_RPORT_ST_PRLI_WAIT: + en_next_state = unf_rport_stat_prli_wait(en_old_state, + v_event); + break; + + /* State LOGO */ + case UNF_RPORT_ST_LOGO: + en_next_state = unf_rport_stat_logo(en_old_state, v_event); + break; + + /* State CLOSING */ + case UNF_RPORT_ST_CLOSING: + en_next_state = unf_rport_stat_closing(en_old_state, v_event); + break; + + /* State READY */ + case UNF_RPORT_ST_READY: + en_next_state = unf_rport_stat_ready(en_old_state, v_event); + break; + + /* State DELETE */ + case UNF_RPORT_ST_DELETE: + default: + en_next_state = UNF_RPORT_ST_INIT; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RPort(0x%x) hold state(0x%x)", + v_rport->nport_id, v_rport->rp_state); + break; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MINOR, + "[info]RPort(0x%x) with oldstate(0x%x) event(0x%x) nextstate(0x%x)", + v_rport->nport_id, en_old_state, v_event, en_next_state); + + unf_set_rport_state(v_rport, en_next_state); +} + +void unf_clean_linkdown_rport(struct unf_lport_s *v_lport) +{ + /* for L_Port's R_Port(s) */ + struct unf_disc_s *disc = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + unsigned long disc_lock_flag = 0; + unsigned long rport_lock_flag = 0; + + UNF_CHECK_VALID(0x3058, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + /* for each busy R_Port */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_lock_flag); + /* --->>> busy_rports */ + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + + /* 1. Prevent process Repeatly: Closing */ + spin_lock_irqsave(&rport->rport_state_lock, rport_lock_flag); + if (rport->rp_state == UNF_RPORT_ST_CLOSING) { + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_lock_flag); + continue; + } + + /* 2. Increase ref_cnt to protect R_Port */ + if (unf_rport_ref_inc(rport) != RETURN_OK) { + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_lock_flag); + continue; + } + + /* 3. Update R_Port state: + * Link Down Event --->>> closing state + */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); + + /* 4. Put R_Port from busy to destroy list */ + list_del_init(&rport->entry_rport); + list_add_tail(&rport->entry_rport, &disc->list_destroy_rports); + + lport = rport->lport; + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_lock_flag); + + /* 5. Schedule Closing work (Enqueuing workqueue) */ + unf_schedule_closing_work(lport, rport); + + /* 6. decrease R_Port ref_cnt (company with 2) */ + unf_rport_ref_dec(rport); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_lock_flag); +} + +void unf_rport_enter_closing(struct unf_rport_s *v_rport) +{ + /* + * call by + * 1. with RSCN processer + * 2. with LOGOUT processer + ** + * from + * 1. R_Port Link Down + * 2. R_Port enter LOGO + */ + unsigned long rport_lock_flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x3059, UNF_TRUE, v_rport, return); + + /* 1. Increase ref_cnt to protect R_Port */ + spin_lock_irqsave(&v_rport->rport_state_lock, rport_lock_flag); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, + rport_lock_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return; + } + + /* NOTE: R_Port state has been set(with closing) */ + + lport = v_rport->lport; + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_lock_flag); + + /* 2. Put R_Port from busy to destroy list */ + disc = &lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, rport_lock_flag); + list_del_init(&v_rport->entry_rport); + list_add_tail(&v_rport->entry_rport, &disc->list_destroy_rports); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, rport_lock_flag); + + /* 3. Schedule Closing work (Enqueuing workqueue) */ + unf_schedule_closing_work(lport, v_rport); + + /* 4. dec R_Port ref_cnt */ + unf_rport_ref_dec(v_rport); +} + +void unf_rport_error_recovery(struct unf_rport_s *v_rport) +{ + unsigned long delay = 0; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3060, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + return; + } + + /* Check R_Port state */ + if ((v_rport->rp_state == UNF_RPORT_ST_CLOSING) || + (v_rport->rp_state == UNF_RPORT_ST_DELETE)) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RPort(0x%x_0x%p) offline and no need process", + v_rport->nport_id, v_rport); + + unf_rport_ref_dec(v_rport); + return; + } + + /* Check repeatability with recovery work */ + if (delayed_work_pending(&v_rport->recovery_work)) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RPort(0x%x_0x%p) recovery work is running and no need process", + v_rport->nport_id, v_rport); + + unf_rport_ref_dec(v_rport); + return; + } + + /* NOTE: Re-login or Logout directly (recovery work) */ + if (v_rport->retries < v_rport->mas_retries) { + v_rport->retries++; + delay = (unsigned long)v_rport->ed_tov; + + if (queue_delayed_work(unf_work_queue, + &v_rport->recovery_work, + (unsigned long)msecs_to_jiffies( + (unsigned int)delay))) { + /* Inc ref_cnt: corresponding to this work timer */ + (void)unf_rport_ref_inc(v_rport); + } + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) state(0x%x) retry login failed", + v_rport->nport_id, v_rport, v_rport->rp_state); + + /* Update R_Port state: LOGO event --->>> ST_LOGO */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* Enter LOGO processer */ + unf_rport_enter_logo(v_rport->lport, v_rport); + } + + unf_rport_ref_dec(v_rport); +} + +static unsigned int unf_rport_reuse_only(struct unf_rport_s *v_rport) +{ + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3061, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* R_Port with delete state */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return UNF_RETURN_ERROR; + } + + /* R_Port State check: delete */ + if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) || + (v_rport->rp_state == UNF_RPORT_ST_CLOSING)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) state(0x%x) is delete or closing no need process", + v_rport->nport_id, v_rport, v_rport->rp_state); + + ret = UNF_RETURN_ERROR; + } + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + unf_rport_ref_dec(v_rport); + + return ret; +} + +static unsigned int unf_rport_reuse_recover(struct unf_rport_s *v_rport) +{ + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3062, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* R_Port with delete state */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return UNF_RETURN_ERROR; + } + + /* R_Port state check: delete */ + if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) || + (v_rport->rp_state == UNF_RPORT_ST_CLOSING)) { + ret = UNF_RETURN_ERROR; + } + + /* Update R_Port state: recovery --->>> ready */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_RECOVERY); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + unf_rport_ref_dec(v_rport); + + return ret; +} + +static unsigned int unf_rport_reuse_init(struct unf_rport_s *v_rport) +{ + unsigned long flage = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3063, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_rport->rport_state_lock, flage); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flage); + + /* R_Port with delete state */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]RPort(0x%x)'s state is 0x%x with use_init flag", + v_rport->nport_id, v_rport->rp_state); + + /* R_Port State check: delete */ + if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) || + (v_rport->rp_state == UNF_RPORT_ST_CLOSING)) { + ret = UNF_RETURN_ERROR; + } else { + /* Update R_Port state: re-enter Init state */ + unf_set_rport_state(v_rport, UNF_RPORT_ST_INIT); + } + spin_unlock_irqrestore(&v_rport->rport_state_lock, flage); + + unf_rport_ref_dec(v_rport); + + return ret; +} + +struct unf_rport_s *unf_get_rport_by_nport_id(struct unf_lport_s *v_lport, + unsigned int nport_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + struct unf_rport_s *find_rport = NULL; + + UNF_CHECK_VALID(0x3048, UNF_TRUE, v_lport, return NULL); + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + + /* for each r_port from rport_busy_list: compare N_Port_ID */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + if (rport && rport->nport_id == nport_id) { + find_rport = rport; + break; + } + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + return find_rport; +} + +struct unf_rport_s *unf_get_safe_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + enum unf_rport_reuse_flag_e v_reuse_flag, + unsigned int v_nport_id) +{ + /* + * New add or plug + * + * retry_flogi --->>> reuse_only + * name_server_register --->>> reuse_only + * SNS_plogi --->>> reuse_only + * enter_flogi --->>> reuse_only + * logout --->>> reuse_only + * flogi_handler --->>> reuse_only + * plogi_handler --->>> reuse_only + * adisc_handler --->>> reuse_recovery + * logout_handler --->>> reuse_init + * prlo_handler --->>> reuse_init + * login_with_loop --->>> reuse_only + * gffid_callback --->>> reuse_only + * delay_plogi --->>> reuse_only + * gffid_rjt --->>> reuse_only + * gffid_rsp_unknown --->>> reuse_only + * gpnid_acc --->>> reuse_init + * fdisc_callback --->>> reuse_only + * flogi_acc --->>> reuse_only + * plogi_acc --->>> reuse_only + * logo_callback --->>> reuse_init + * rffid_callback --->>> reuse_only + */ +#define UNF_AVOID_LINK_FLASH_TIME 3000 + + struct unf_rport_s *rport = v_rport; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3075, UNF_TRUE, v_lport, return NULL); + + /* 1. Alloc New R_Port or Update R_Port Property */ + if (!rport) { + /* If NULL, get/Alloc new node + * (R_Port from R_Port pool) directly + */ + rport = unf_rport_get_free_and_init(v_lport, UNF_PORT_TYPE_FC, + v_nport_id); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) get exist RPort(0x%x) with state(0x%x) and reuse_flag(0x%x)", + v_lport->port_id, rport->nport_id, + rport->rp_state, v_reuse_flag); + + switch (v_reuse_flag) { + case UNF_RPORT_REUSE_ONLY: + ret = unf_rport_reuse_only(rport); + if (ret != RETURN_OK) { + /* R_Port within delete list: need get new */ + rport = unf_rport_get_free_and_init( + v_lport, + UNF_PORT_TYPE_FC, + v_nport_id); + } + break; + + case UNF_RPORT_REUSE_INIT: + ret = unf_rport_reuse_init(rport); + if (ret != RETURN_OK) { + /* R_Port within delete list: need get new */ + rport = unf_rport_get_free_and_init( + v_lport, + UNF_PORT_TYPE_FC, + v_nport_id); + } + break; + + case UNF_RPORT_REUSE_RECOVER: + ret = unf_rport_reuse_recover(rport); + if (ret != RETURN_OK) { + /* R_Port within delete list, + * NOTE: do nothing + */ + rport = NULL; + } + break; + + default: + break; + } + } + + return rport; +} + +unsigned int unf_get_port_feature(unsigned long long v_wwpn) +{ + struct unf_rport_feature_recard_s *port_fea = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + struct list_head list_temp_node; + + spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags); + list_for_each_safe(node, next_node, &port_fea_pool->list_busy_head) { + port_fea = list_entry(node, struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return port_fea->port_feature; + } + } + + list_for_each_safe(node, next_node, &port_fea_pool->list_free_head) { + port_fea = list_entry(node, struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return port_fea->port_feature; + } + } + + /* can't find wwpn */ + if (list_empty(&port_fea_pool->list_free_head)) { + /* free is empty, transport busy to free */ + list_temp_node = port_fea_pool->list_free_head; + port_fea_pool->list_free_head = port_fea_pool->list_busy_head; + port_fea_pool->list_busy_head = list_temp_node; + } + + port_fea = list_entry((&port_fea_pool->list_free_head)->prev, + struct unf_rport_feature_recard_s, + entry_feature); + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, &port_fea_pool->list_busy_head); + + port_fea->wwpn = v_wwpn; + port_fea->port_feature = UNF_PORT_MODE_UNKNOWN; + + spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags); + return UNF_PORT_MODE_UNKNOWN; +} + +void unf_update_port_feature(unsigned long long v_wwpn, + unsigned int v_port_feature) +{ + struct unf_rport_feature_recard_s *port_fea = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags); + list_for_each_safe(node, next_node, &port_fea_pool->list_busy_head) { + port_fea = list_entry(node, + struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + port_fea->port_feature = v_port_feature; + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return; + } + } + + list_for_each_safe(node, next_node, &port_fea_pool->list_free_head) { + port_fea = list_entry(node, struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + port_fea->port_feature = v_port_feature; + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return; + } + } + + spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags); +} diff --git a/drivers/scsi/huawei/hifc/unf_rport.h b/drivers/scsi/huawei/hifc/unf_rport.h new file mode 100644 index 000000000000..5e1e6551b94a --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_rport.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_RPORT_H +#define __UNF_RPORT_H + +#define UNF_MAX_SCSI_ID 2048 +#define UNF_LOSE_TMO 30 +#define UNF_RPORT_INVALID_INDEX 0xffff + +/* RSCN compare DISC list with local RPort macro */ +#define UNF_RPORT_NEED_PROCESS 0x1 +#define UNF_RPORT_ONLY_IN_DISC_PROCESS 0x2 +#define UNF_RPORT_ONLY_IN_LOCAL_PROCESS 0x3 +#define UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS 0x4 +#define UNF_RPORT_NOT_NEED_PROCESS 0x5 + +#define UNF_ECHO_SEND_MAX_TIMES 1 + +extern struct unf_rport_feature_pool_s *port_fea_pool; + +enum unf_rport_login_state_e { + UNF_RPORT_ST_INIT = 0x1000, /* initialized */ + UNF_RPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */ + UNF_RPORT_ST_PRLI_WAIT, /* waiting for PRLI completion */ + UNF_RPORT_ST_READY, /* ready for use */ + UNF_RPORT_ST_LOGO, /* port logout sent */ + UNF_RPORT_ST_CLOSING, /* being closed */ + UNF_RPORT_ST_DELETE, /* port being deleted */ + UNF_RPORT_ST_BUTT +}; + +enum unf_rport_event_e { + UNF_EVENT_RPORT_NORMAL_ENTER = 0x9000, + UNF_EVENT_RPORT_ENTER_PLOGI = 0x9001, + UNF_EVENT_RPORT_ENTER_PRLI = 0x9002, + UNF_EVENT_RPORT_READY = 0x9003, + UNF_EVENT_RPORT_LOGO = 0x9004, + UNF_EVENT_RPORT_CLS_TIMEOUT = 0x9005, + UNF_EVENT_RPORT_RECOVERY = 0x9006, + UNF_EVENT_RPORT_RELOGIN = 0x9007, + UNF_EVENT_RPORT_LINK_DOWN = 0x9008, + UNF_EVENT_RPORT_BUTT +}; + +/* RPort local link state */ +enum unf_port_state_e { + UNF_PORT_STATE_LINKUP = 0x1001, + UNF_PORT_STATE_LINKDOWN = 0x1002 +}; + +enum unf_rport_reuse_flag_e { + UNF_RPORT_REUSE_ONLY = 0x1001, + UNF_RPORT_REUSE_INIT = 0x1002, + UNF_RPORT_REUSE_RECOVER = 0x1003 +}; + +struct unf_disc_rport_s { + /* RPort entry */ + struct list_head entry_rport; + + unsigned int nport_id; /* Remote port NPortID */ + unsigned int disc_done; /* 1:Disc done */ +}; + +struct unf_rport_feature_pool_s { + struct list_head list_busy_head; + struct list_head list_free_head; + void *p_port_feature_pool_addr; + spinlock_t port_fea_pool_lock; +}; + +struct unf_rport_feature_recard_s { + struct list_head entry_feature; + unsigned long long wwpn; + unsigned int port_feature; + unsigned int reserved; +}; + +struct unf_os_thread_private_data_s { + struct list_head list; + spinlock_t spin_lock; + struct task_struct *thread; + unsigned int in_process; + unsigned int cpu_id; + atomic_t user_count; +}; + +/* Remote Port struct */ +struct unf_rport_s { + unsigned int max_frame_size; + unsigned int supported_classes; + + /* Dynamic Attributes */ + /* Remote Port loss timeout in seconds. */ + unsigned int dev_loss_tmo; + + unsigned long long node_name; + unsigned long long port_name; + unsigned int nport_id; /* Remote port NPortID */ + unsigned int local_nport_id; + + unsigned int roles; + + /* Remote port local INI state */ + enum unf_port_state_e lport_ini_state; + enum unf_port_state_e last_lport_ini_state; + + /* Remote port local TGT state */ + enum unf_port_state_e lport_tgt_state; + enum unf_port_state_e last_lport_tgt_state; + + /* Port Type:fc */ + unsigned int port_type; + + /* RPort reference counter */ + atomic_t rport_ref_cnt; + + /* Pending IO count */ + atomic_t pending_io_cnt; + + /* RPort entry */ + struct list_head entry_rport; + + /* Port State,delay reclaim when uiRpState == complete. */ + enum unf_rport_login_state_e rp_state; + unsigned int disc_done; /* 1:Disc done */ + + struct unf_lport_s *lport; + void *rport; + spinlock_t rport_state_lock; + + /* Port attribution */ + unsigned int ed_tov; + unsigned int ra_tov; + unsigned int options; /* ini or tgt */ + unsigned int last_report_linkup_options; + unsigned int fcp_conf_needed; /* INI Rport send FCP CONF flag */ + unsigned int tape_support_needed; /* INI tape support flag */ + unsigned int retries; /* special req retry times */ + unsigned int logo_retries; /* logo error recovery retry times */ + unsigned int mas_retries; /* special req retry times */ + /* Rport alloc jiffies */ + unsigned long long rport_alloc_jifs; + + void *session; + + /* binding with SCSI */ + unsigned int scsi_id; + + /* disc list compare flag */ + unsigned int rscn_position; + + unsigned int rport_index; + + /* RPort timer,closing status */ + struct work_struct closing_work; + + /* RPort timer,rport linkup */ + struct work_struct start_work; + + /* RPort timer,recovery */ + struct delayed_work recovery_work; + + /* RPort timer,TGT mode,PRLI waiting */ + struct delayed_work open_work; + + struct semaphore task_sema; + /* Callback after rport Ready/delete.[with state:ok/fail]. + * Creat/free TGT session here + * input : L_Port,R_Port,state:ready + * --creat session/delete--free session + */ + void (*pfn_unf_rport_call_back)(void *, void *, unsigned int); + + struct unf_os_thread_private_data_s *data_thread; +}; + +#define UNF_IO_RESULT_CNT(v_scsi_table, v_scsi_id, v_io_result) \ + do { \ + if (likely(((v_io_result) < UNF_MAX_IO_RETURN_VALUE) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic64_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->io_done_cnt[v_io_result]); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] io return value(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_result, v_scsi_id); \ + } \ + } while (0) + +#define UNF_SCSI_CMD_CNT(v_scsi_table, v_scsi_id, v_io_type) \ + do { \ + if (likely(((v_io_type) < UNF_MAX_SCSI_CMD) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic64_inc(&((v_scsi_table->wwn_rport_info_table[v_scsi_id]).dfx_counter->scsi_cmd_cnt[v_io_type])); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_type, v_scsi_id); \ + } \ + } while (0) + +#define UNF_SCSI_ERROR_HANDLE_CNT(v_scsi_table, v_scsi_id, v_io_type) \ + do { \ + if (likely(((v_io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->error_handle[v_io_type]); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_type, v_scsi_id); \ + } \ + } while (0) + +#define UNF_SCSI_ERROR_HANDLE_RESULT_CNT(v_scsi_table, v_scsi_id, v_io_type) \ + do { \ + if (likely(((v_io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->error_handle_result[v_io_type]); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_type, v_scsi_id); \ + } \ + } while (0) + +void unf_rport_state_ma(struct unf_rport_s *v_rport, + enum unf_rport_event_e v_event); +void unf_update_lport_state_by_linkup_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int rport_att); +void unf_rport_enter_closing(struct unf_rport_s *v_rport); +void unf_clean_linkdown_rport(struct unf_lport_s *v_lport); +void unf_rport_error_recovery(struct unf_rport_s *v_rport); +struct unf_rport_s *unf_get_rport_by_nport_id(struct unf_lport_s *v_lport, + unsigned int nport_id); +void unf_rport_enter_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_rport_ref_inc(struct unf_rport_s *v_rport); +void unf_rport_ref_dec(struct unf_rport_s *v_rport); + +struct unf_rport_s *unf_rport_set_qualifier_key_reuse( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport_by_nport_id, + struct unf_rport_s *v_rport_by_wwpn, + unsigned long long v_wwpn, + unsigned int v_sid); +void unf_rport_delay_login(struct unf_rport_s *v_rport); +struct unf_rport_s *unf_find_valid_rport(struct unf_lport_s *v_lport, + unsigned long long v_wwpn, + unsigned int v_sid); +void unf_rport_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +struct unf_rport_s *unf_get_safe_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + enum unf_rport_reuse_flag_e v_reuse_flag, + unsigned int v_nport_id); +void *unf_rport_get_free_and_init(void *v_lport, + unsigned int v_port_type, + unsigned int v_nport_id); +unsigned int unf_free_scsi_id(struct unf_lport_s *v_lport, + unsigned int v_scsi_id); +void unf_schedule_closing_work(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +void unf_sesion_loss_timeout(struct work_struct *v_work); +unsigned int unf_get_port_feature(unsigned long long v_wwpn); +void unf_update_port_feature(unsigned long long v_wwpn, + unsigned int v_port_feature); + +#endif +
From: Chenguangli chenguangli2@huawei.com
driver inclusion category: feature bugzilla: NA
-----------------------------------------------------------------------
This module is mainly used to process I/O commands from the SCSI layer and the exception mechanism.
Signed-off-by: Chenguangli chenguangli2@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Acked-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/scsi/huawei/hifc/hifc_io.c | 1243 ++++++++++++ drivers/scsi/huawei/hifc/hifc_io.h | 66 + drivers/scsi/huawei/hifc/hifc_wqe.c | 667 +++++++ drivers/scsi/huawei/hifc/hifc_wqe.h | 486 +++++ .../scsi/huawei/hifc/hifcoe_parent_context.h | 414 ++++ drivers/scsi/huawei/hifc/hifcoe_wqe.h | 1698 +++++++++++++++++ drivers/scsi/huawei/hifc/unf_io.c | 1338 +++++++++++++ drivers/scsi/huawei/hifc/unf_io.h | 90 + drivers/scsi/huawei/hifc/unf_io_abnormal.c | 926 +++++++++ drivers/scsi/huawei/hifc/unf_io_abnormal.h | 16 + 10 files changed, 6944 insertions(+) create mode 100644 drivers/scsi/huawei/hifc/hifc_io.c create mode 100644 drivers/scsi/huawei/hifc/hifc_io.h create mode 100644 drivers/scsi/huawei/hifc/hifc_wqe.c create mode 100644 drivers/scsi/huawei/hifc/hifc_wqe.h create mode 100644 drivers/scsi/huawei/hifc/hifcoe_parent_context.h create mode 100644 drivers/scsi/huawei/hifc/hifcoe_wqe.h create mode 100644 drivers/scsi/huawei/hifc/unf_io.c create mode 100644 drivers/scsi/huawei/hifc/unf_io.h create mode 100644 drivers/scsi/huawei/hifc/unf_io_abnormal.c create mode 100644 drivers/scsi/huawei/hifc/unf_io_abnormal.h
diff --git a/drivers/scsi/huawei/hifc/hifc_io.c b/drivers/scsi/huawei/hifc/hifc_io.c new file mode 100644 index 000000000000..cd1c47fa7df2 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_io.c @@ -0,0 +1,1243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "hifc_module.h" +#include "hifc_hba.h" +#include "hifc_service.h" +#include "hifc_io.h" + +/* Whether to enable the payload printing + * function depends on the content of exchange + */ +#ifdef HIFC_PRINT_PAYLOADINFO_ENABLE +#include "unf_exchg.h" +#endif + +/* Set this parameter based on EDTOV 2S */ +#define HIFC_IMMIDATA_ABORT_TIME 2000 +#define hifc_fill_pkg_status(com_err_code, control, scsi_status) \ + (((unsigned int)(com_err_code) << 16) |\ + ((unsigned int)(control) << 8) |\ + (unsigned int)(scsi_status)) + +unsigned int dif_protect_op_code = INVALID_VALUE32; +unsigned int dif_app_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; +unsigned int dif_ref_esc_check = HIFC_DIF_APP_REF_ESC_CHECK; +unsigned int dif_sect_size; +unsigned int no_dif_sect_size; +unsigned int dix_flag; +unsigned int grd_ctrl; +unsigned int grd_agm_ctrl = HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16; +unsigned int cmp_app_tag_mask = 0xffff; +unsigned int ref_tag_mod = INVALID_VALUE32; +unsigned int rep_ref_tag; +unsigned short cmp_app_tag; +unsigned short rep_app_tag; + +static void hifc_dif_err_count(struct hifc_hba_s *v_hba, + unsigned char v_dif_info) +{ + unsigned char dif_info = v_dif_info; + + HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_ALL); + + if (dif_info & HIFC_DIF_ERROR_CODE_CRC) + HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_CRC); + + if (dif_info & HIFC_DIF_ERROR_CODE_APP) + HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_APP); + + if (dif_info & HIFC_DIF_ERROR_CODE_REF) + HIFC_DIF_ERR_STAT(v_hba, HIFC_DIF_RECV_DIFERR_REF); +} + +static void hifc_build_no_dif_control(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_fc_dif_info_s *v_dif_info) +{ + struct hifcoe_fc_dif_info_s *dif_info = v_dif_info; + + /* dif enable or disable */ + dif_info->wd0.difx_en = HIFC_DIF_DISABLE; + + dif_info->wd1.vpid = v_pkg->qos_level; + dif_info->wd1.lun_qos_en = 0; +} + +void hifc_dif_action_forward(struct hifcoe_fc_dif_info_s *v_dif_info_l1, + struct unf_dif_control_info_s *v_dif_ctrl_u1) +{ + v_dif_info_l1->wd0.grd_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_CRC_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + + v_dif_info_l1->wd0.grd_ctrl |= (v_dif_ctrl_u1->protect_opcode & + UNF_REPLACE_CRC_MASK) ? HIFC_DIF_GARD_REF_APP_CTRL_REPLACE : + HIFC_DIF_GARD_REF_APP_CTRL_FORWARD; + + v_dif_info_l1->wd0.ref_tag_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_LBA_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + + v_dif_info_l1->wd0.ref_tag_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_REPLACE_LBA_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_REPLACE : HIFC_DIF_GARD_REF_APP_CTRL_FORWARD; + + v_dif_info_l1->wd1.app_tag_ctrl |= (v_dif_ctrl_u1->protect_opcode & + UNF_VERIFY_APP_MASK) ? HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + + v_dif_info_l1->wd1.app_tag_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_REPLACE_APP_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_REPLACE : HIFC_DIF_GARD_REF_APP_CTRL_FORWARD; +} + +void hifc_dif_action_delete(struct hifcoe_fc_dif_info_s *v_dif_info_l1, + struct unf_dif_control_info_s *v_dif_ctrl_u1) +{ + v_dif_info_l1->wd0.grd_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_CRC_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + v_dif_info_l1->wd0.grd_ctrl |= HIFC_DIF_GARD_REF_APP_CTRL_DELETE; + + v_dif_info_l1->wd0.ref_tag_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_LBA_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + v_dif_info_l1->wd0.ref_tag_ctrl |= HIFC_DIF_GARD_REF_APP_CTRL_DELETE; + + v_dif_info_l1->wd1.app_tag_ctrl |= + (v_dif_ctrl_u1->protect_opcode & UNF_VERIFY_APP_MASK) ? + HIFC_DIF_GARD_REF_APP_CTRL_VERIFY : + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + v_dif_info_l1->wd1.app_tag_ctrl |= HIFC_DIF_GARD_REF_APP_CTRL_DELETE; +} + + +static void hifc_convert_dif_action( + struct unf_dif_control_info_s *v_dif_ctrl_u1, + struct hifcoe_fc_dif_info_s *v_dif_info_l1) +{ + struct hifcoe_fc_dif_info_s *dif_info_l1 = NULL; + struct unf_dif_control_info_s *dif_ctrl_u1 = NULL; + + dif_info_l1 = v_dif_info_l1; + dif_ctrl_u1 = v_dif_ctrl_u1; + + switch (UNF_DIF_ACTION_MASK & dif_ctrl_u1->protect_opcode) { + case UNF_DIF_ACTION_VERIFY_AND_REPLACE: + case UNF_DIF_ACTION_VERIFY_AND_FORWARD: + hifc_dif_action_forward(dif_info_l1, dif_ctrl_u1); + break; + + case UNF_DIF_ACTION_INSERT: + dif_info_l1->wd0.grd_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + dif_info_l1->wd0.grd_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_INSERT; + dif_info_l1->wd0.ref_tag_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + dif_info_l1->wd0.ref_tag_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_INSERT; + dif_info_l1->wd1.app_tag_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; + dif_info_l1->wd1.app_tag_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_INSERT; + break; + + case UNF_DIF_ACTION_VERIFY_AND_DELETE: + hifc_dif_action_delete(dif_info_l1, dif_ctrl_u1); + break; + + default: + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "Unknown dif protect opcode 0x%x", + dif_ctrl_u1->protect_opcode); + break; + } +} + +void hifc_get_dif_info_l1(struct hifcoe_fc_dif_info_s *v_dif_info_l1, + struct unf_dif_control_info_s *v_dif_ctrl_u1) +{ + v_dif_info_l1->wd1.cmp_app_tag_msk = cmp_app_tag_mask; + + v_dif_info_l1->rep_app_tag = v_dif_ctrl_u1->app_tag; + v_dif_info_l1->rep_ref_tag = v_dif_ctrl_u1->start_lba; + + v_dif_info_l1->cmp_app_tag = v_dif_ctrl_u1->app_tag; + v_dif_info_l1->cmp_ref_tag = v_dif_ctrl_u1->start_lba; + + if (cmp_app_tag != 0) + v_dif_info_l1->cmp_app_tag = cmp_app_tag; + + if (rep_app_tag != 0) + v_dif_info_l1->rep_app_tag = rep_app_tag; + + if (rep_ref_tag != 0) + v_dif_info_l1->rep_ref_tag = rep_ref_tag; +} + +static void hifc_build_dif_control(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_fc_dif_info_s *v_dif_info_l1) +{ + struct hifcoe_fc_dif_info_s *dif_info_l1 = NULL; + struct unf_dif_control_info_s *dif_ctrl_u1 = NULL; + + dif_info_l1 = v_dif_info_l1; + dif_ctrl_u1 = &v_pkg->dif_control; + + /* dif enable or disable */ + dif_info_l1->wd0.difx_en = HIFC_DIF_ENABLE; + + dif_info_l1->wd1.vpid = v_pkg->qos_level; + dif_info_l1->wd1.lun_qos_en = 0; + + /* 512B + 8 size mode */ + dif_info_l1->wd0.sct_size = + (dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) ? + HIFC_DIF_SECTOR_4KB_MODE : HIFC_DIF_SECTOR_512B_MODE; + + no_dif_sect_size = (dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) ? + HIFC_SECT_SIZE_4096 : HIFC_SECT_SIZE_512; + + dif_sect_size = (dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) ? + HIFC_SECT_SIZE_4096_8 : HIFC_SECT_SIZE_512_8; + + /* The length is adjusted when the burst len is adjusted. + * The length is initialized to 0 + */ + dif_info_l1->wd0.difx_len = 0; + + /* dif type 1 */ + dif_info_l1->wd0.dif_verify_type = dif_type; + dif_info_l1->wd0.dif_ins_rep_type = dif_type; + + /* Check whether the 0xffff app or ref domain is isolated + * If all ff messages are displayed in type1 app, checkcheck sector + * v_dif_info_l1->wd0.difx_app_esc = HIFC_DIF_APP_REF_ESC_CHECK + */ + + dif_info_l1->wd0.difx_app_esc = dif_app_esc_check; + + /* type1 ref tag If all ff is displayed, check sector is required */ + dif_info_l1->wd0.difx_ref_esc = dif_ref_esc_check; + + /* Currently, only t10 crc is supported */ + dif_info_l1->wd0.grd_agm_ctrl = 0; + + /* Set this parameter based on the values of bit zero and bit one. + * The initial value is 0, and the value is UNF_DEFAULT_CRC_GUARD_SEED + */ + dif_info_l1->wd0.grd_agm_ini_ctrl = + HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1; + dif_info_l1->wd1.app_tag_ctrl = 0; + dif_info_l1->wd0.grd_ctrl = 0; + dif_info_l1->wd0.ref_tag_ctrl = 0; + + /* Convert the verify operation, replace, forward, insert, + * and delete operations based on the actual operation code of + * the upper layer + */ + if (dif_protect_op_code != INVALID_VALUE32) { + dif_ctrl_u1->protect_opcode = dif_protect_op_code | + (dif_ctrl_u1->protect_opcode & UNF_DIF_ACTION_MASK); + } + + hifc_convert_dif_action(dif_ctrl_u1, dif_info_l1); + + /* Address self-increase mode */ + dif_info_l1->wd0.ref_tag_mode = (dif_ctrl_u1->protect_opcode & + UNF_DIF_ACTION_NO_INCREASE_REFTAG) ? (BOTH_NONE) : (BOTH_INCREASE); + + if (ref_tag_mod != INVALID_VALUE32) + dif_info_l1->wd0.ref_tag_mode = ref_tag_mod; + + /* This parameter is used only when type 3 is set to 0xffff. */ + + hifc_get_dif_info_l1(dif_info_l1, dif_ctrl_u1); +} + +static unsigned int hifc_fill_external_sgl_page( + struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct unf_esgl_page_s *v_esgl_page, + unsigned int sge_num, + int v_direct, + unsigned int context_id, + unsigned int dif_flag) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int index = 0; + unsigned int sge_num_per_page = 0; + unsigned int buffer_addr = 0; + unsigned int buf_len = 0; + char *buf = NULL; + unsigned long phys = 0; + struct unf_esgl_page_s *esgl_page = NULL; + struct hifcoe_variable_sge_s *sge = NULL; + + esgl_page = v_esgl_page; + while (sge_num > 0) { + /* Obtains the initial address of the sge page */ + sge = (struct hifcoe_variable_sge_s *)esgl_page->page_address; + + /* Calculate the number of sge on each page */ + sge_num_per_page = (esgl_page->page_size) / + sizeof(struct hifcoe_variable_sge_s); + + /* Fill in sgl page. The last sge of each page is link sge + * by default + */ + for (index = 0; index < (sge_num_per_page - 1); index++) { + UNF_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf, + &buf_len, dif_flag); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + phys = (unsigned long)buf; + sge[index].buf_addr_hi = UNF_DMA_HI32(phys); + sge[index].buf_addr_lo = UNF_DMA_LO32(phys); + sge[index].wd0.buf_len = buf_len; + sge[index].wd0.r_flag = 0; + sge[index].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + /* parity bit */ + sge[index].wd1.buf_addr_gpa = + (sge[index].buf_addr_lo >> 16); + sge[index].wd1.xid = (context_id & 0x3fff); + + hifc_cpu_to_big32(&sge[index], + sizeof(struct hifcoe_variable_sge_s)); + + sge_num--; + if (sge_num == 0) + break; + } + + /* sge Set the end flag on the last sge of the page if all the + * pages have been filled. + */ + if (sge_num == 0) { + sge[index].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + sge[index].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG; + + /* parity bit */ + buffer_addr = be32_to_cpu(sge[index].buf_addr_lo); + sge[index].wd1.buf_addr_gpa = (buffer_addr >> 16); + sge[index].wd1.xid = (context_id & 0x3fff); + + hifc_cpu_to_big32(&sge[index].wd1, HIFC_DWORD_BYTE); + } + /* If only one sge is left empty, the sge reserved on the page + * is used for filling. + */ + else if (sge_num == 1) { + UNF_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf, + &buf_len, dif_flag); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + phys = (unsigned long)buf; + sge[index].buf_addr_hi = UNF_DMA_HI32(phys); + sge[index].buf_addr_lo = UNF_DMA_LO32(phys); + sge[index].wd0.buf_len = buf_len; + sge[index].wd0.r_flag = 0; + sge[index].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + sge[index].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG; + + /* parity bit */ + sge[index].wd1.buf_addr_gpa = + (sge[index].buf_addr_lo >> 16); + sge[index].wd1.xid = (context_id & 0x3fff); + + hifc_cpu_to_big32(&sge[index], + sizeof(struct hifcoe_variable_sge_s)); + + sge_num--; + } else { + /* Apply for a new sgl page and fill in link sge */ + UNF_GET_FREE_ESGL_PAGE(esgl_page, v_hba->lport, v_pkg); + if (!esgl_page) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, "Get free esgl page failed."); + return UNF_RETURN_ERROR; + } + phys = esgl_page->esgl_phyaddr; + sge[index].buf_addr_hi = UNF_DMA_HI32(phys); + sge[index].buf_addr_lo = UNF_DMA_LO32(phys); + + /* For the cascaded wqe, you only need to enter the + * cascading buffer address and extension flag, and do + * not need to fill in other fields + */ + sge[index].wd0.buf_len = 0; + sge[index].wd0.r_flag = 0; + sge[index].wd1.extension_flag = + HIFC_WQE_SGE_EXTEND_FLAG; + sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + /* parity bit */ + sge[index].wd1.buf_addr_gpa = + (sge[index].buf_addr_lo >> 16); + sge[index].wd1.xid = (context_id & 0x3fff); + + hifc_cpu_to_big32(&sge[index], + sizeof(struct hifcoe_variable_sge_s)); + } + + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_INFO, + "Port(0x%x) SID(0x%x) DID(0x%x) RXID(0x%x) build esgl left sge num: %u.", + v_hba->port_cfg.port_id, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did, + v_pkg->frame_head.oxid_rxid, + sge_num); + } + + return RETURN_OK; +} + +static unsigned int hifc_build_local_dif_sgl(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int v_bd_sge_num) +{ + unsigned int ret = UNF_RETURN_ERROR; + char *buf = NULL; + unsigned int buf_len = 0; + unsigned long phys = 0; + unsigned int dif_sge_place = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + + parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg); + if (unlikely(!parent_sq)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.", + v_hba->port_cfg.port_id, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + /* DIF SGE must be followed by BD SGE */ + dif_sge_place = ((v_bd_sge_num <= v_pkg->entry_count) ? + v_bd_sge_num : v_pkg->entry_count); + + /* The entry_count= 0 needs to be specially processed and does not + * need to be mounted. As long as len is set to zero, Last-bit is set + * to one, and E-bit is set to 0. + */ + if (v_pkg->dif_control.dif_sge_count == 0) { + v_sqe->sge[dif_sge_place].buf_addr_hi = 0; + v_sqe->sge[dif_sge_place].buf_addr_lo = 0; + v_sqe->sge[dif_sge_place].wd0.buf_len = 0; + } else { + UNF_CM_GET_DIF_SGL_ENTRY(ret, (void *)v_pkg, &buf, &buf_len); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, "DOUBLE DIF Get Dif Buf Fail."); + return UNF_RETURN_ERROR; + } + + phys = (unsigned long)buf; + v_sqe->sge[dif_sge_place].buf_addr_hi = UNF_DMA_HI32(phys); + v_sqe->sge[dif_sge_place].buf_addr_lo = UNF_DMA_LO32(phys); + v_sqe->sge[dif_sge_place].wd0.buf_len = buf_len; + } + + /* rdma flag. If the fc is not used, enter 0. */ + v_sqe->sge[dif_sge_place].wd0.r_flag = 0; + + /* parity bit */ + v_sqe->sge[dif_sge_place].wd1.buf_addr_gpa = + (v_sqe->sge[dif_sge_place].buf_addr_lo >> 16); + v_sqe->sge[dif_sge_place].wd1.xid = (parent_sq->context_id & 0x3fff); + + /* The local sgl does not use the cascading SGE. Therefore, the value + * of this field is always 0. + */ + v_sqe->sge[dif_sge_place].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + v_sqe->sge[dif_sge_place].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG; + + hifc_cpu_to_big32(&v_sqe->sge[dif_sge_place], + sizeof(struct hifcoe_variable_sge_s)); + + return RETURN_OK; +} + +static unsigned int hifc_build_external_dif_sgl(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int v_bd_sge_num) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_esgl_page_s *esgl_page = NULL; + unsigned long phys = 0; + unsigned int left_sge_num = 0; + unsigned int dif_sge_place = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + + parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg); + if (unlikely(!parent_sq)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.", + v_hba->port_cfg.port_id, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + /* DIF SGE must be followed by BD SGE */ + dif_sge_place = ((v_bd_sge_num <= v_pkg->entry_count) ? + v_bd_sge_num : v_pkg->entry_count); + + /* Allocate the first page first */ + UNF_GET_FREE_ESGL_PAGE(esgl_page, v_hba->lport, v_pkg); + if (!esgl_page) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, "DOUBLE DIF Get External Page Fail."); + return UNF_RETURN_ERROR; + } + + phys = esgl_page->esgl_phyaddr; + + /* Configuring the Address of the Cascading Page */ + v_sqe->sge[dif_sge_place].buf_addr_hi = UNF_DMA_HI32(phys); + v_sqe->sge[dif_sge_place].buf_addr_lo = UNF_DMA_LO32(phys); + + /* Configuring Control Information About the Cascading Page */ + v_sqe->sge[dif_sge_place].wd0.buf_len = 0; + v_sqe->sge[dif_sge_place].wd0.r_flag = 0; + v_sqe->sge[dif_sge_place].wd1.extension_flag = HIFC_WQE_SGE_EXTEND_FLAG; + v_sqe->sge[dif_sge_place].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + /* parity bit */ + v_sqe->sge[dif_sge_place].wd1.buf_addr_gpa = + (v_sqe->sge[dif_sge_place].buf_addr_lo >> 16); + v_sqe->sge[dif_sge_place].wd1.xid = (parent_sq->context_id & 0x3fff); + + hifc_cpu_to_big32(&v_sqe->sge[dif_sge_place], + sizeof(struct hifcoe_variable_sge_s)); + + /* Fill in the sge information on the cascading page */ + left_sge_num = v_pkg->dif_control.dif_sge_count; + ret = hifc_fill_external_sgl_page(v_hba, v_pkg, esgl_page, left_sge_num, + v_direct, parent_sq->context_id, + UNF_TRUE); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + return RETURN_OK; +} + +static unsigned int hifc_build_local_sgl(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + int v_direct) +{ + unsigned int ret = UNF_RETURN_ERROR; + char *buf = NULL; + unsigned int buf_len = 0; + unsigned int index = 0; + unsigned long phys = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + + parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg); + if (unlikely(!parent_sq)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[fail]Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.", + v_hba->port_cfg.port_id, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + for (index = 0; index < v_pkg->entry_count; index++) { + UNF_CM_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf, &buf_len); + + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + phys = (unsigned long)buf; + + v_sqe->sge[index].buf_addr_hi = UNF_DMA_HI32(phys); + v_sqe->sge[index].buf_addr_lo = UNF_DMA_LO32(phys); + v_sqe->sge[index].wd0.buf_len = buf_len; + + /* rdma flag. If the fc is not used, enter 0. */ + v_sqe->sge[index].wd0.r_flag = 0; + + /* parity bit */ + v_sqe->sge[index].wd1.buf_addr_gpa = + (v_sqe->sge[index].buf_addr_lo >> 16); + v_sqe->sge[index].wd1.xid = (parent_sq->context_id & 0x3fff); + + /* The local sgl does not use the cascading SGE. Therefore, the + * value of this field is always 0. + */ + v_sqe->sge[index].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + v_sqe->sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + if (index == (v_pkg->entry_count - 1)) { + /* Sets the last WQE end flag 1 */ + v_sqe->sge[index].wd1.last_flag = + HIFC_WQE_SGE_LAST_FLAG; + } + + hifc_cpu_to_big32(&v_sqe->sge[index], + sizeof(struct hifcoe_variable_sge_s)); + } + + /* Adjust the length of the BDSL field in the CTRL domain. */ + HIFC_ADJUST_DATA(v_sqe->ctrl_sl.ch.wd0.bdsl, + HIFC_BYTES_TO_QW_NUM((v_pkg->entry_count * + sizeof(struct hifcoe_variable_sge_s)))); + + /* The entry_count= 0 needs to be specially processed and does not + * need to be mounted. As long as len is set to zero, Last-bit is set + * to one, and E-bit is set to 0. + */ + if (v_pkg->entry_count == 0) { + v_sqe->sge[0].buf_addr_hi = 0; + v_sqe->sge[0].buf_addr_lo = 0; + v_sqe->sge[0].wd0.buf_len = 0; + + /* rdma flag. This field is not used in fc. Set it to 0. */ + v_sqe->sge[0].wd0.r_flag = 0; + + /* parity bit */ + v_sqe->sge[0].wd1.buf_addr_gpa = + (v_sqe->sge[0].buf_addr_lo >> 16); + v_sqe->sge[0].wd1.xid = (parent_sq->context_id & 0x3fff); + + /* The local sgl does not use the cascading SGE. Therefore, + * the value of this field is always 0. + */ + v_sqe->sge[0].wd1.extension_flag = HIFC_WQE_SGE_NOT_EXTEND_FLAG; + v_sqe->sge[0].wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG; + + hifc_cpu_to_big32(&v_sqe->sge[0], + sizeof(struct hifcoe_variable_sge_s)); + + /* Adjust the length of the BDSL field in the CTRL domain. */ + HIFC_ADJUST_DATA( + v_sqe->ctrl_sl.ch.wd0.bdsl, + HIFC_BYTES_TO_QW_NUM( + sizeof(struct hifcoe_variable_sge_s))); + } + + return RETURN_OK; +} + +static unsigned int hifc_build_external_sgl(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int v_bd_sge_num) +{ + unsigned int ret = UNF_RETURN_ERROR; + char *buf = NULL; + struct unf_esgl_page_s *esgl_page = NULL; + unsigned long phys = 0; + unsigned int buf_len = 0; + unsigned int index = 0; + unsigned int left_sge_num = 0; + unsigned int local_sge_num = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + + parent_sq = hifc_find_parent_sq_by_pkg((void *)v_hba, v_pkg); + if (unlikely(!parent_sq)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "Port(0x%x) send packet oxid_rxid(0x%x) fail, as sid_did(0x%x_0x%x)'s parent sq is null.", + v_hba->port_cfg.port_id, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + /* Ensure that the value of v_bd_sge_num is greater than or equal to one + */ + local_sge_num = v_bd_sge_num - 1; + + for (index = 0; index < local_sge_num; index++) { + UNF_CM_GET_SGL_ENTRY(ret, (void *)v_pkg, &buf, &buf_len); + if (unlikely(ret != RETURN_OK)) + return UNF_RETURN_ERROR; + phys = (unsigned long)buf; + + v_sqe->sge[index].buf_addr_hi = UNF_DMA_HI32(phys); + v_sqe->sge[index].buf_addr_lo = UNF_DMA_LO32(phys); + v_sqe->sge[index].wd0.buf_len = buf_len; + + /* RDMA flag, which is not used by FC. */ + v_sqe->sge[index].wd0.r_flag = 0; + v_sqe->sge[index].wd1.extension_flag = + HIFC_WQE_SGE_NOT_EXTEND_FLAG; + v_sqe->sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + /* parity bit */ + v_sqe->sge[index].wd1.buf_addr_gpa = + (v_sqe->sge[index].buf_addr_lo >> 16); + v_sqe->sge[index].wd1.xid = (parent_sq->context_id & 0x3fff); + + hifc_cpu_to_big32(&v_sqe->sge[index], + sizeof(struct hifcoe_variable_sge_s)); + } + + /* Allocating the first cascading page */ + UNF_GET_FREE_ESGL_PAGE(esgl_page, v_hba->lport, v_pkg); + if (unlikely(!esgl_page)) + return UNF_RETURN_ERROR; + + phys = esgl_page->esgl_phyaddr; + /* Adjust the length of the BDSL field in the CTRL domain. */ + HIFC_ADJUST_DATA(v_sqe->ctrl_sl.ch.wd0.bdsl, + HIFC_BYTES_TO_QW_NUM((v_bd_sge_num * + sizeof(struct hifcoe_variable_sge_s)))); + + /* Configuring the Address of the Cascading Page */ + v_sqe->sge[index].buf_addr_hi = (u32)UNF_DMA_HI32(phys); + v_sqe->sge[index].buf_addr_lo = (u32)UNF_DMA_LO32(phys); + + /* Configuring Control Information About the Cascading Page */ + v_sqe->sge[index].wd0.buf_len = 0; + v_sqe->sge[index].wd0.r_flag = 0; + v_sqe->sge[index].wd1.extension_flag = HIFC_WQE_SGE_EXTEND_FLAG; + v_sqe->sge[index].wd1.last_flag = HIFC_WQE_SGE_NOT_LAST_FLAG; + + /* parity bit */ + v_sqe->sge[index].wd1.buf_addr_gpa = + (v_sqe->sge[index].buf_addr_lo >> 16); + v_sqe->sge[index].wd1.xid = (parent_sq->context_id & 0x3fff); + + hifc_cpu_to_big32(&v_sqe->sge[index], + sizeof(struct hifcoe_variable_sge_s)); + + /* Calculate the number of remaining sge. */ + left_sge_num = v_pkg->entry_count - local_sge_num; + + /* Fill in the sge information on the cascading page. */ + ret = hifc_fill_external_sgl_page(v_hba, v_pkg, esgl_page, + left_sge_num, v_direct, + parent_sq->context_id, + UNF_FALSE); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + return RETURN_OK; +} + +unsigned int hifc_build_sql_by_local_sge_num(struct unf_frame_pkg_s *v_pkg, + struct hifc_hba_s *v_hba, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int bd_sge_num) +{ + unsigned int ret = RETURN_OK; + + if (v_pkg->entry_count <= bd_sge_num) { + ret = hifc_build_local_sgl(v_hba, v_pkg, v_sqe, v_direct); + } else { + ret = hifc_build_external_sgl(v_hba, v_pkg, v_sqe, + v_direct, bd_sge_num); + } + return ret; +} + +unsigned int hifc_conf_dual_sgl_info(struct unf_frame_pkg_s *v_pkg, + struct hifc_hba_s *v_hba, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int bd_sge_num, + int double_sgl) +{ + unsigned int ret = RETURN_OK; + + if (double_sgl == UNF_TRUE) { + /* Adjust the length of the DIF_SL field in the CTRL domain */ + HIFC_ADJUST_DATA( + v_sqe->ctrl_sl.ch.wd0.dif_sl, + HIFC_BYTES_TO_QW_NUM( + sizeof(struct hifcoe_variable_sge_s))); + + if (v_pkg->dif_control.dif_sge_count <= + HIFC_WQE_SGE_DIF_ENTRY_NUM) { + ret = hifc_build_local_dif_sgl(v_hba, v_pkg, v_sqe, + v_direct, bd_sge_num); + } else { + ret = hifc_build_external_dif_sgl(v_hba, v_pkg, v_sqe, + v_direct, bd_sge_num); + } + } + + return ret; +} + +static unsigned int hifc_build_sgl(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + int v_direct, + unsigned int dif_flag) +{ + unsigned int ret = RETURN_OK; + unsigned int bd_sge_num = HIFC_WQE_SGE_ENTRY_NUM; + int double_sgl = UNF_FALSE; + + if ((dif_flag != 0) && + (v_pkg->dif_control.flags & UNF_DIF_DOUBLE_SGL)) { + bd_sge_num = + HIFC_WQE_SGE_ENTRY_NUM - HIFC_WQE_SGE_DIF_ENTRY_NUM; + double_sgl = UNF_TRUE; + } + + /* Only one wqe local sge can be loaded. If more than one wqe local sge + * is used, use the esgl + */ + ret = hifc_build_sql_by_local_sge_num(v_pkg, v_hba, v_sqe, + v_direct, bd_sge_num); + + if (unlikely(ret != RETURN_OK)) + return ret; + + /* Configuring Dual SGL Information for DIF */ + ret = hifc_conf_dual_sgl_info(v_pkg, v_hba, v_sqe, v_direct, + bd_sge_num, double_sgl); + + return ret; +} + +static void hifc_adjust_dix(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_fc_dif_info_s *v_dif_info_l1, + unsigned char v_task_type) +{ + unsigned char task_type = v_task_type; + struct hifcoe_fc_dif_info_s *dif_info_l1 = NULL; + + dif_info_l1 = v_dif_info_l1; + + if (dix_flag == 1) { + if ((task_type == HIFC_SQE_FCP_IWRITE) || + (task_type == HIFC_SQE_FCP_TRD)) { + if ((UNF_DIF_ACTION_MASK & + (v_pkg->dif_control.protect_opcode)) == + UNF_DIF_ACTION_VERIFY_AND_FORWARD) { + dif_info_l1->wd0.grd_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_REPLACE; + dif_info_l1->wd0.grd_agm_ctrl = + HIFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16; + } + + if ((UNF_DIF_ACTION_MASK & + (v_pkg->dif_control.protect_opcode)) == + UNF_DIF_ACTION_VERIFY_AND_DELETE) { + dif_info_l1->wd0.grd_agm_ctrl = + HIFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16; + } + } + + if ((task_type == HIFC_SQE_FCP_IREAD) || + (task_type == HIFC_SQE_FCP_TWR)) { + if ((UNF_DIF_ACTION_MASK & + (v_pkg->dif_control.protect_opcode)) == + UNF_DIF_ACTION_VERIFY_AND_FORWARD) { + dif_info_l1->wd0.grd_ctrl |= + HIFC_DIF_GARD_REF_APP_CTRL_REPLACE; + dif_info_l1->wd0.grd_agm_ctrl = + HIFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM; + } + + if ((UNF_DIF_ACTION_MASK & + (v_pkg->dif_control.protect_opcode)) == + UNF_DIF_ACTION_INSERT) { + dif_info_l1->wd0.grd_agm_ctrl = + HIFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM; + } + } + } + + if (grd_agm_ctrl != 0) + dif_info_l1->wd0.grd_agm_ctrl = grd_agm_ctrl; + + if (grd_ctrl != 0) + dif_info_l1->wd0.grd_ctrl = grd_ctrl; +} + +void hifc_get_dma_direction_by_fcp_cmnd(const struct unf_fcp_cmnd_s *v_fcp_cmnd, + int *v_pi_dma_direction, + unsigned char *v_task_type) +{ + if (UNF_FCP_WR_DATA & v_fcp_cmnd->control) { + *v_task_type = HIFC_SQE_FCP_IWRITE; + *v_pi_dma_direction = DMA_TO_DEVICE; + } else if (UNF_GET_TASK_MGMT_FLAGS(v_fcp_cmnd->control) != 0) { + *v_task_type = HIFC_SQE_FCP_ITMF; + *v_pi_dma_direction = DMA_FROM_DEVICE; + } else { + *v_task_type = HIFC_SQE_FCP_IREAD; + *v_pi_dma_direction = DMA_FROM_DEVICE; + } +} + +static void hifc_adjust_icmnd_burst_len(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_ts_s *v_sqe_ts, + int direction) +{ + struct hifcoe_sqe_icmnd_s *icmnd = &v_sqe_ts->cont.icmnd; + + icmnd->info.dif_info.wd0.difx_len = 0; +} + +static inline unsigned int hifc_build_cmnd_wqe(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sge) +{ + unsigned int ret = RETURN_OK; + int direction = 0; + unsigned char task_type = 0; + struct unf_fcp_cmnd_s *fcp_cmnd = NULL; + struct hifcoe_sqe_s *sqe = v_sge; + unsigned int dif_flag = 0; + + fcp_cmnd = v_pkg->fcp_cmnd; + if (unlikely(!fcp_cmnd)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Package's FCP commond pointer is NULL."); + + return UNF_RETURN_ERROR; + } + + hifc_get_dma_direction_by_fcp_cmnd(fcp_cmnd, &direction, &task_type); + + hifc_build_icmnd_wqe_ts_header(v_pkg, sqe, task_type, + v_hba->exit_base, v_hba->port_index); + + hifc_build_trd_twr_wqe_ctrls(v_pkg, sqe); + + hifc_build_icmnd_wqe_ts(v_hba, v_pkg, &sqe->ts_sl); + + if (task_type != HIFC_SQE_FCP_ITMF) { + if (v_pkg->dif_control.protect_opcode == UNF_DIF_ACTION_NONE) { + dif_flag = 0; + hifc_build_no_dif_control( + v_pkg, + &sqe->ts_sl.cont.icmnd.info.dif_info); + } else { + dif_flag = 1; + hifc_build_dif_control( + v_hba, v_pkg, + &sqe->ts_sl.cont.icmnd.info.dif_info); + hifc_adjust_dix( + v_pkg, &sqe->ts_sl.cont.icmnd.info.dif_info, + task_type); + hifc_adjust_icmnd_burst_len(v_pkg, &sqe->ts_sl, + direction); + } + } + + ret = hifc_build_sgl(v_hba, v_pkg, sqe, direction, dif_flag); + + return ret; +} + +unsigned int hifc_send_scsi_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_parent_sq_info_s *parent_sq = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct hifcoe_sqe_s sqe; + + /* input param check */ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_pkg, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + (UNF_GET_OXID(v_pkg) != INVALID_VALUE16), return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + hba = v_hba; + + /* 1. find parent sq for scsi_cmnd(pkg) */ + parent_sq = hifc_find_parent_sq_by_pkg(hba, v_pkg); + if (unlikely(!parent_sq)) + /* Do not need to print info */ + return UNF_RETURN_ERROR; + + v_pkg->qos_level += hba->vpid_start; + + /* 2. build cmnd wqe (to sqe) for scsi_cmnd(pkg) */ + ret = hifc_build_cmnd_wqe(hba, v_pkg, &sqe); + if (unlikely(ret != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_ERR, + "[fail]Port(0x%x) Build WQE failed, SID(0x%x) DID(0x%x) OXID(0x%x) pkg type(0x%x) hot pool tag(0x%x).", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did, UNF_GET_OXID(v_pkg), + v_pkg->type, UNF_GET_XCHG_TAG(v_pkg)); + + return ret; + } + + /* 3. En-Queue Parent SQ for scsi_cmnd(pkg) sqe */ + ret = hifc_parent_sq_enqueue(parent_sq, &sqe); + + return ret; +} + +static void hifc_ini_status_default_handler(struct hifcoe_scqe_iresp_s *v_iresp, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned char control = 0; + unsigned short com_err_code = 0; + + control = v_iresp->wd2.fcp_flag & HIFC_CTRL_MASK; + + if (v_iresp->fcp_resid != 0) { + com_err_code = UNF_IO_FAILED; + v_pkg->residus_len = v_iresp->fcp_resid; + } else { + com_err_code = UNF_IO_SUCCESS; + v_pkg->residus_len = 0; + } + + v_pkg->status = hifc_fill_pkg_status(com_err_code, control, + v_iresp->wd2.scsi_status); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Fill package with status: 0x%x, residus len: 0x%x", + v_pkg->status, v_pkg->residus_len); +} + +void hifc_check_fcp_rsp_iu(struct hifcoe_scqe_iresp_s *v_iresp, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned char scsi_status = 0; + unsigned char control = 0; + + control = (unsigned char)v_iresp->wd2.fcp_flag; + scsi_status = (unsigned char)v_iresp->wd2.scsi_status; + + /* FcpRspIU with Little End from IOB/WQE, to COM's pstPkg also */ + if (control & FCP_RESID_UNDER_MASK) { + /* under flow: usually occurs in inquiry */ + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]I_STS IOB posts under flow with residus len: %u, FCP residue: %u.", + v_pkg->residus_len, v_iresp->fcp_resid); + + if (v_pkg->residus_len != v_iresp->fcp_resid) { + v_pkg->status = hifc_fill_pkg_status(UNF_IO_FAILED, + control, + scsi_status); + } else { + v_pkg->status = + hifc_fill_pkg_status(UNF_IO_UNDER_FLOW, + control, scsi_status); + } + } + + if (control & FCP_RESID_OVER_MASK) { + /* over flow: error happened */ + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]I_STS IOB posts over flow with residus len: %u, FCP residue: %u.", + v_pkg->residus_len, v_iresp->fcp_resid); + + if (v_pkg->residus_len != v_iresp->fcp_resid) { + v_pkg->status = hifc_fill_pkg_status(UNF_IO_FAILED, + control, + scsi_status); + } else { + v_pkg->status = hifc_fill_pkg_status(UNF_IO_OVER_FLOW, + control, + scsi_status); + } + } + + v_pkg->unf_rsp_pload_bl.length = 0; + v_pkg->unf_sense_pload_bl.length = 0; + + if (control & FCP_RSP_LEN_VALID_MASK) { + /* dma by chip */ + v_pkg->unf_rsp_pload_bl.buffer_ptr = NULL; + + v_pkg->unf_rsp_pload_bl.length = v_iresp->fcp_rsp_len; + v_pkg->byte_orders |= UNF_BIT_3; + } + + if (control & FCP_SNS_LEN_VALID_MASK) { + /* dma by chip */ + v_pkg->unf_sense_pload_bl.buffer_ptr = NULL; + + v_pkg->unf_sense_pload_bl.length = v_iresp->fcp_sns_len; + v_pkg->byte_orders |= UNF_BIT_4; + } +} + +unsigned short hifc_get_com_err_code(struct unf_frame_pkg_s *v_pkg) +{ + unsigned short com_err_code = UNF_IO_FAILED; + + if (v_pkg->status_sub_code == DRV_DIF_CRC_ERR) + com_err_code = UNF_IO_DIF_ERROR; + else if (v_pkg->status_sub_code == DRV_DIF_LBA_ERR) + com_err_code = UNF_IO_DIF_REF_ERROR; + else + com_err_code = UNF_IO_DIF_GEN_ERROR; + return com_err_code; +} + +void hifc_process_ini_fail_io(struct hifc_hba_s *v_hba, + struct hifcoe_scqe_iresp_s *v_iresp, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned short com_err_code = UNF_IO_FAILED; + unsigned char dif_info = 0; + + /* 1. error stats process */ + if (HIFC_GET_SCQE_STATUS((union hifcoe_scqe_u *)(void *)v_iresp) != 0) { + switch (HIFC_GET_SCQE_STATUS( + (union hifcoe_scqe_u *)(void *)v_iresp)) { + /* DIF error process */ + case HIFC_COMPLETION_STATUS_DIF_ERROR: + dif_info = (unsigned char)v_iresp->wd1.dif_info; + v_pkg->status_sub_code = + (dif_info & HIFC_DIF_ERROR_CODE_CRC) ? + DRV_DIF_CRC_ERR : ((dif_info & + HIFC_DIF_ERROR_CODE_REF) ? DRV_DIF_LBA_ERR : + ((dif_info & HIFC_DIF_ERROR_CODE_APP) ? + DRV_DIF_APP_ERR : 0)); + + com_err_code = hifc_get_com_err_code(v_pkg); + + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_MAJOR, + "[err]Port(0x%x) INI io oxid(0x%x), rxid(0x%x) status with dif err(0x%x)", + v_hba->port_cfg.port_id, v_iresp->wd0.ox_id, + v_iresp->wd0.rx_id, dif_info); + + hifc_dif_err_count(v_hba, dif_info); + break; + + /* I/O not complete: 1.session reset; 2.clear buffer */ + case FCOE_CQE_BUFFER_CLEAR_IO_COMPLETED: + case FCOE_CQE_SESSION_RST_CLEAR_IO_COMPLETED: + case FCOE_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED: + case FCOE_CQE_WQE_FLUSH_IO_COMPLETED: + com_err_code = UNF_IO_CLEAN_UP; + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR, + "[warn]Port(0x%x) INI IO not complete, OX_ID(0x%x) RX_ID(0x%x) status(0x%x)", + v_hba->port_cfg.port_id, v_iresp->wd0.ox_id, + v_iresp->wd0.rx_id, com_err_code); + break; + + /* any other: I/O failed --->>> DID error */ + default: + com_err_code = UNF_IO_FAILED; + break; + } + + /* fill pkg status & return directly */ + v_pkg->status = + hifc_fill_pkg_status(com_err_code, v_iresp->wd2.fcp_flag, + v_iresp->wd2.scsi_status); + return; + } + + /* 2. default stats process */ + hifc_ini_status_default_handler(v_iresp, v_pkg); + + /* 3. FCP RSP IU check */ + hifc_check_fcp_rsp_iu(v_iresp, v_pkg); +} + +unsigned int hifc_scq_recv_iresp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_wqe) +{ + struct hifcoe_scqe_iresp_s *iresp = NULL; + struct unf_frame_pkg_s pkg; + unsigned int ret = RETURN_OK; + + iresp = (struct hifcoe_scqe_iresp_s *)(void *)v_wqe; + + /* 1. Constraints: I_STS remain cnt must be zero */ + if (unlikely(HIFC_GET_SCQE_REMAIN_CNT(v_wqe) != 0)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) ini_wqe(OX_ID:0x%x RX_ID:0x%x) remain_cnt(0x%x) abnormal, status(0x%x)", + v_hba->port_cfg.port_id, + iresp->wd0.ox_id, + iresp->wd0.rx_id, + HIFC_GET_SCQE_REMAIN_CNT(v_wqe), + HIFC_GET_SCQE_STATUS(v_wqe)); + + UNF_PRINT_SFS_LIMIT(UNF_MAJOR, v_hba->port_cfg.port_id, v_wqe, + sizeof(union hifcoe_scqe_u)); + + /* return directly */ + return UNF_RETURN_ERROR; + } + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = iresp->magic_num; + + /* 2. OX_ID validity check */ + if (likely(((unsigned short)iresp->wd0.ox_id >= v_hba->exit_base) && + ((unsigned short)iresp->wd0.ox_id < + v_hba->exit_base + v_hba->exit_count))) { + pkg.status = UNF_IO_SUCCESS; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = + iresp->wd0.ox_id - v_hba->exit_base; + } else { + /* OX_ID error: return by COM */ + pkg.status = UNF_IO_FAILED; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE16; + + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) ini_cmnd_wqe(OX_ID:0x%x RX_ID:0x%x) ox_id invalid, status(0x%x)", + v_hba->port_cfg.port_id, + iresp->wd0.ox_id, + iresp->wd0.rx_id, + HIFC_GET_SCQE_STATUS(v_wqe)); + + UNF_PRINT_SFS_LIMIT(UNF_MAJOR, v_hba->port_cfg.port_id, + v_wqe, sizeof(union hifcoe_scqe_u)); + } + + /* 3. status check */ + if (unlikely(HIFC_GET_SCQE_STATUS(v_wqe) || + (iresp->wd2.scsi_status != 0) || + (iresp->fcp_resid != 0) || + ((iresp->wd2.fcp_flag & HIFC_CTRL_MASK) != 0))) { + HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[warn]Port(0x%x) scq_status(0x%x) scsi_status(0x%x) fcp_resid(0x%x) fcp_flag(0x%x)", + v_hba->port_cfg.port_id, HIFC_GET_SCQE_STATUS(v_wqe), + iresp->wd2.scsi_status, iresp->fcp_resid, + iresp->wd2.fcp_flag); + + /* set pkg status & check fcp_rsp IU */ + hifc_process_ini_fail_io(v_hba, iresp, &pkg); + } + + /* 4. LL_Driver ---to--->>> COM_Driver */ + UNF_LOWLEVEL_SCSI_COMPLETED(ret, v_hba->lport, &pkg); + + return ret; +} diff --git a/drivers/scsi/huawei/hifc/hifc_io.h b/drivers/scsi/huawei/hifc/hifc_io.h new file mode 100644 index 000000000000..e7e7e1bd38a4 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_io.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_IO_H__ +#define __HIFC_IO_H__ + +enum dif_mode_e { + DIF_MODE_NONE = 0x0, + DIF_MODE_INSERT = 0x1, + DIF_MODE_REMOVE = 0x2, + DIF_MODE_FORWARD_OR_REPLACE = 0x3 +}; + +enum ref_tag_mode_e { + BOTH_NONE = 0x0, + RECEIVE_INCREASE = 0x1, + REPLACE_INCREASE = 0x2, + BOTH_INCREASE = 0x3 +}; + +#define HIFC_DIF_DISABLE 0 +#define HIFC_DIF_ENABLE 1 +#define HIFC_DIF_SECTOR_512B_MODE 0 +#define HIFC_DIF_SECTOR_4KB_MODE 1 +#define HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16 0x0 +#define HIFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM 0x1 +#define HIFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16 0x2 +#define HIFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_IP_CHECKSUM 0x3 +#define HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_REGISTER 0 +#define HIFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1 0x4 + +#define HIFC_DIF_GARD_REF_APP_CTRL_VERIFY 0x4 +#define HIFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY 0x0 +#define HIFC_DIF_GARD_REF_APP_CTRL_INSERT 0x0 +#define HIFC_DIF_GARD_REF_APP_CTRL_DELETE 0x1 +#define HIFC_DIF_GARD_REF_APP_CTRL_FORWARD 0x2 +#define HIFC_DIF_GARD_REF_APP_CTRL_REPLACE 0x3 + +#define HIFC_DIF_ERROR_CODE_MASK 0xe +#define HIFC_DIF_ERROR_CODE_CRC 0x2 +#define HIFC_DIF_ERROR_CODE_REF 0x4 +#define HIFC_DIF_ERROR_CODE_APP 0x8 + +#define HIFC_DIF_SEND_DIFERR_PAYLOAD 0 +#define HIFC_DIF_SEND_DIFERR_CRC 1 +#define HIFC_DIF_SEND_DIFERR_APP 2 +#define HIFC_DIF_SEND_DIFERR_REF 3 +#define HIFC_DIF_RECV_DIFERR_ALL 4 +#define HIFC_DIF_RECV_DIFERR_CRC 5 +#define HIFC_DIF_RECV_DIFERR_APP 6 +#define HIFC_DIF_RECV_DIFERR_REF 7 + +#define HIFC_SECT_SIZE_512 512 +#define HIFC_SECT_SIZE_4096 4096 +#define HIFC_SECT_SIZE_512_8 520 +#define HIFC_SECT_SIZE_4096_8 4104 +#define HIFC_CTRL_MASK 0x1f + +unsigned int hifc_send_scsi_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg); +unsigned int hifc_scq_recv_iresp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_wqe); + +#endif /* __HIFC_IO_H__ */ diff --git a/drivers/scsi/huawei/hifc/hifc_wqe.c b/drivers/scsi/huawei/hifc/hifc_wqe.c new file mode 100644 index 000000000000..d1339d4bf917 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_wqe.c @@ -0,0 +1,667 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Fabric Channel Linux driver + * Copyright(c) 2018 Huawei Technologies Co., Ltd + * + */ + +#include "hifc_module.h" +#include "hifc_service.h" + +void hifc_build_common_wqe_ctrls(struct hifcoe_wqe_ctrl_s *v_ctrl_sl, + unsigned char v_task_len) +{ + /* "BDSL" field of CtrlS - defines the size of BDS, + * which varies from 0 to 2040 bytes (8 bits of 8 bytes' chunk) + */ + v_ctrl_sl->ch.wd0.bdsl = 0; + + /* + * "DrvSL" field of CtrlS - defines the size of DrvS, which varies from + * 0 to 24 bytes + */ + v_ctrl_sl->ch.wd0.drv_sl = 0; + + /* a. + * b1 - linking WQE, which will be only used in linked page architecture + * instead of ring, it's a special control WQE which does not contain + * any buffer or inline data information, and will only be consumed by + * hardware. The size is aligned to WQEBB/WQE b0 - normal WQE, either + * normal SEG WQE or inline data WQE + */ + v_ctrl_sl->ch.wd0.wf = 0; + + /* + * "CF" field of CtrlS - Completion Format - defines the format of CS. + * a.b0 - Status information is embedded inside of Completion Section + * b.b1 - Completion Section keeps SGL, where Status information + * should be written. (For the definition of SGLs see ?4.1* .) + */ + v_ctrl_sl->ch.wd0.cf = 0; + + /* + * "TSL" field of CtrlS - defines the size of TS, which varies from 0 + * to 248 bytes + */ + v_ctrl_sl->ch.wd0.tsl = v_task_len; + + /* + * Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE + * format is of two types, which are defined by "VA " field of CtrlS. + * "VA" stands for Virtual Address: o b0. SGE comprises 64-bits buffer's + * pointer and 31-bits Length, each SGE can only support up to 2G-1B, + * it can guarantee each single SGE length can not exceed 2GB by nature, + * A byte count value of zero means a 0byte data transfer.o b1. + * SGE comprises 64-bits buffer's pointer, 31-bits Length and 30-bits + * Key of the Translation table ,each SGE can only support up to 2G-1B, + * it can guarantee each single SGE length can notexceed 2GB by nature, + * A byte count value of zero means a 0byte data transfer + */ + v_ctrl_sl->ch.wd0.va = 0; + + /* + * "DF" field of CtrlS - Data Format - defines the format of BDS + * a. b0 - BDS carries the list of SGEs (SGL) + * b. b1 - BDS carries the inline data + */ + v_ctrl_sl->ch.wd0.df = 0; + + /* + * "CR" - Completion is Required - marks CQE generation request per WQE + */ + v_ctrl_sl->ch.wd0.cr = 1; + + /* + * "DIFSL" field of CtrlS - defines the size of DIFS, which varies from + * 0 to 56 bytes + */ + v_ctrl_sl->ch.wd0.dif_sl = 0; + + /* + * "CSL" field of CtrlS - defines the size of CS, which varies from 0 to + * 24 bytes + */ + v_ctrl_sl->ch.wd0.csl = 0; + + /* "CtrlSL" - C describes the size of CtrlS in 8 bytes chunks. + *The value Zero is not valid + */ + v_ctrl_sl->ch.wd0.ctrl_sl = 1; + + /* "O" - Owner - marks ownership of WQE */ + v_ctrl_sl->ch.wd0.owner = 0; +} + +void hifc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe) +{ + /* "BDSL" field of CtrlS - defines the size of BDS, which varies from + * 0 to 2040 bytes (8 bits of 8 bytes' chunk) + */ + /* TrdWqe carry 2 SGE defaultly, 4DW per SGE, the value is 4 because + * unit is 2DW, in double SGL mode, bdsl is 2 + */ + v_sqe->ctrl_sl.ch.wd0.bdsl = HIFC_T_RD_WR_WQE_CTR_BDSL_SIZE; + + /* + * "DrvSL" field of CtrlS - defines the size of DrvS, which varies from + * 0 to 24 bytes DrvSL config for 0 + */ + v_sqe->ctrl_sl.ch.wd0.drv_sl = 0; + + /* a. b1 - linking WQE, which will be only used in linked page + * architecture instead of ring, it's a special control WQE which does + * not contain any buffer or inline data information, and will only be + * consumed by hardware. The size is aligned to WQEBB/WQE b0 - normal + * WQE, either normal SEG WQE or inline data WQE + */ + /* normal wqe */ + v_sqe->ctrl_sl.ch.wd0.wf = 0; + + /* + * "CF" field of CtrlS - Completion Format - defines the format of CS. + * a.b0 - Status information is embedded inside of Completion Section + * b.b1 - Completion Section keeps SGL, where Status information + * should be written. (For the definition of SGLs see ?4.1.) + */ + /* by SCQE mode, the value is ignored */ + v_sqe->ctrl_sl.ch.wd0.cf = 0; + + /* "TSL" field of CtrlS - defines the size of TS, which varies from 0 to + * 248 bytes + */ + /* TSL is configured by 56 bytes */ + v_sqe->ctrl_sl.ch.wd0.tsl = sizeof(struct hifcoe_sqe_ts_s) / + HIFC_WQE_SECTION_CHUNK_SIZE; + + /* + * Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE + * format is of two types, which are defined by "VA" field of CtrlS. + * "VA" stands for Virtual Address: o b0. SGE comprises 64-bits buffer's + * pointer and 31-bits Length, each SGE can only support up to 2G-1B, it + * can guarantee each single SGE length can not exceed 2GB by nature, A + * byte count value of zero means a 0byte data transfer. o b1. SGE + * comprises 64-bits buffer's pointer, 31-bits Length and 30-bits Key of + * the Translation table , each SGE can only support up to 2G-1B, it can + * guarantee each single SGE length can not exceed 2GB by nature, A byte + * count value of zero means a 0byte data transfer + */ + v_sqe->ctrl_sl.ch.wd0.va = 0; + + /* + * "DF" field of CtrlS - Data Format - defines the format of BDS + * a. b0 - BDS carries the list of SGEs (SGL) + * b. b1 - BDS carries the inline data + */ + v_sqe->ctrl_sl.ch.wd0.df = 0; + + /* "CR" - Completion is Required marks CQE generation request per WQE */ + /* by SCQE mode, this value is ignored */ + v_sqe->ctrl_sl.ch.wd0.cr = 1; + + /* + * "DIFSL" field of CtrlS - defines the size of DIFS, which varies from + * 0 to 56 bytes. + */ + v_sqe->ctrl_sl.ch.wd0.dif_sl = 0; + + /* + * "CSL" field of CtrlS - defines the size of CS, which varies from 0 to + * 24 bytes + */ + v_sqe->ctrl_sl.ch.wd0.csl = 0; + + /* "CtrlSL" - C describes the size of CtrlS in 8 bytes chunks. + * The value Zero is not valid. + */ + v_sqe->ctrl_sl.ch.wd0.ctrl_sl = HIFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE; + + /* "O" - Owner - marks ownership of WQE */ + v_sqe->ctrl_sl.ch.wd0.owner = 0; +} + +void hifc_build_service_wqe_ts_common(struct hifcoe_sqe_ts_s *v_sqe_ts, + unsigned int rport_index, + unsigned short local_xid, + unsigned short remote_xid, + unsigned short data_len) +{ + v_sqe_ts->local_xid = local_xid; + + v_sqe_ts->wd0.conn_id = (unsigned short)rport_index; + v_sqe_ts->wd0.remote_xid = remote_xid; + + v_sqe_ts->cont.els_gs_elsrsp_comm.data_len = data_len; +} + +void hifc_build_els_gs_wqe_sge(struct hifcoe_sqe_s *v_sqe, void *v_buf_addr, + unsigned long long v_phy_addr, + unsigned int buf_len, + unsigned int xid, void *v_hba) +{ + unsigned long long els_rsp_phy_addr; + struct hifcoe_variable_sge_s *psge = NULL; + + /* Fill in SGE and convert it to big-endian. */ + psge = &v_sqe->sge[0]; + els_rsp_phy_addr = v_phy_addr; + psge->buf_addr_hi = HIFC_HIGH_32_BITS(els_rsp_phy_addr); + psge->buf_addr_lo = HIFC_LOW_32_BITS(els_rsp_phy_addr); + psge->wd0.buf_len = buf_len; + psge->wd0.r_flag = 0; + psge->wd1.extension_flag = HIFC_WQE_SGE_NOT_EXTEND_FLAG; + psge->wd1.buf_addr_gpa = (psge->buf_addr_lo >> 16); + psge->wd1.xid = (xid & 0x3fff); + psge->wd1.last_flag = HIFC_WQE_SGE_LAST_FLAG; + hifc_cpu_to_big32(psge, sizeof(*psge)); + + /* Converts the payload of an FC frame into a big end. */ + hifc_cpu_to_big32(v_buf_addr, buf_len); +} + +void hifc_build_els_wqe_ts_rsp(struct hifcoe_sqe_s *v_sqe, void *v_sq_info, + void *v_frame_pld, unsigned short type, + unsigned short cmnd, unsigned int v_scqn) +{ + struct unf_pril_payload_s *pri_acc_pld = NULL; + struct hifcoe_sqe_els_rsp_s *els_rsp = NULL; + struct hifcoe_sqe_ts_s *sqe_ts = NULL; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(0x5015, UNF_TRUE, v_sqe, return); + UNF_CHECK_VALID(0x5015, UNF_TRUE, v_frame_pld, return); + UNF_CHECK_VALID(0x5015, UNF_TRUE, v_sq_info, return); + + sqe_ts = &v_sqe->ts_sl; + els_rsp = &sqe_ts->cont.els_rsp; + sqe_ts->task_type = HIFC_SQE_ELS_RSP; + + /* The default chip does not need to update parameters. */ + els_rsp->wd1.para_update = 0x0; + + sq_info = (struct hifc_parent_sq_info_s *)v_sq_info; + hba = (struct hifc_hba_s *)sq_info->phba; + /* When the PLOGI request is sent, the microcode needs to be instructed + * to clear the I/O related to the link to avoid data inconsistency + * caused by the disorder of the IO. + */ + if (((cmnd == ELS_LOGO) || (cmnd == ELS_PLOGI)) && hba) { + els_rsp->wd1.clr_io = 1; + els_rsp->wd6.reset_exch_start = hba->exit_base; + els_rsp->wd6.reset_exch_end = hba->exit_base + + (hba->exit_count - 1); + els_rsp->wd7.scqn = v_scqn; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) send cmd(0x%x) to RPort(0x%x),rport index(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.", + sq_info->local_port_id, + cmnd, + sq_info->remote_port_id, + sq_info->rport_index, + els_rsp->wd6.reset_exch_start, + els_rsp->wd6.reset_exch_end, + v_scqn); + + return; + } + + if (type == ELS_RJT) + return; + + /* + * Enter WQE in the PrliAcc negotiation parameter, and fill in the + * Update flag in WQE. + */ + if (cmnd == ELS_PRLI) { + /* The chip updates the PLOGI ACC negotiation parameters. */ + els_rsp->wd2.seq_cnt = sq_info->plogi_coparams.seq_cnt; + els_rsp->wd2.e_d_tov = sq_info->plogi_coparams.ed_tov; + els_rsp->wd2.tx_mfs = sq_info->plogi_coparams.tx_mfs; + els_rsp->e_d_tov_timer_val = + sq_info->plogi_coparams.ed_tov_timer_val; + + /* The chip updates the PRLI ACC parameter. */ + pri_acc_pld = (struct unf_pril_payload_s *)v_frame_pld; + els_rsp->wd4.xfer_dis = HIFC_GET_PRLI_PARAM_WXFER( + pri_acc_pld->parms); + els_rsp->wd4.conf = HIFC_GET_PRLI_PARAM_CONF( + pri_acc_pld->parms); + els_rsp->wd4.rec = HIFC_GET_PRLI_PARAM_REC(pri_acc_pld->parms); + + els_rsp->wd1.para_update = 0x03; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x,e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x,xfer_dis:0x%x, conf:0x%x,rec:0x%x.", + sq_info->local_port_id, + sq_info->rport_index, els_rsp->wd2.seq_cnt, + els_rsp->wd2.e_d_tov, els_rsp->wd2.tx_mfs, + els_rsp->e_d_tov_timer_val, els_rsp->wd4.xfer_dis, + els_rsp->wd4.conf, els_rsp->wd4.rec); + } +} + +void hifc_build_els_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, void *v_sq_info, + unsigned short cmnd, unsigned int v_scqn, + void *v_frame_pld) +{ + struct hifcoe_sqe_ts_s *v_sqe_ts = NULL; + struct hifcoe_sqe_t_els_gs_s *els_req = NULL; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + struct unf_rec_pld_s *rec_pld = NULL; + + v_sqe_ts = &v_sqe->ts_sl; + v_sqe_ts->task_type = HIFC_SQE_ELS_CMND; + els_req = &v_sqe_ts->cont.t_els_gs; + + sq_info = (struct hifc_parent_sq_info_s *)v_sq_info; + hba = (struct hifc_hba_s *)sq_info->phba; + + /* + * When the PLOGI request is sent, the microcode needs to be instructed + * to clear the I/O related to the link to avoid data inconsistency + * caused by the disorder of the IO. + */ + if (((cmnd == ELS_LOGO) || (cmnd == ELS_PLOGI)) && hba) { + els_req->wd4.clr_io = 1; + els_req->wd6.reset_exch_start = hba->exit_base; + els_req->wd6.reset_exch_end = hba->exit_base + + (hba->exit_count - 1); + els_req->wd7.scqn = v_scqn; + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) Rport(0x%x) SID(0x%x) send %s to DID(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.", + hba->port_cfg.port_id, sq_info->rport_index, + sq_info->local_port_id, + (cmnd == ELS_PLOGI) ? "PLOGI" : "LOGO", + sq_info->remote_port_id, + els_req->wd6.reset_exch_start, + els_req->wd6.reset_exch_end, + v_scqn); + + return; + } + + /* The chip updates the PLOGI ACC negotiation parameters. */ + if (cmnd == ELS_PRLI) { + els_req->wd5.seq_cnt = sq_info->plogi_coparams.seq_cnt; + els_req->wd5.e_d_tov = sq_info->plogi_coparams.ed_tov; + els_req->wd5.tx_mfs = sq_info->plogi_coparams.tx_mfs; + els_req->e_d_tov_timer_val = + sq_info->plogi_coparams.ed_tov_timer_val; + + els_req->wd4.rec_support = hba->port_cfg.tape_support ? 1 : 0; + els_req->wd4.para_update = 0x01; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x, e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x.", + sq_info->local_port_id, sq_info->rport_index, + els_req->wd5.seq_cnt, els_req->wd5.e_d_tov, + els_req->wd5.tx_mfs, + els_req->e_d_tov_timer_val); + } + + if (cmnd == ELS_ECHO) + els_req->echo_flag = UNF_TRUE; + if (cmnd == ELS_REC) { + rec_pld = (struct unf_rec_pld_s *)v_frame_pld; + els_req->wd4.rec_flag = 1; + rec_pld->ox_id += hba->exit_base; + els_req->wd4.orign_oxid = rec_pld->ox_id; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) Rport(0x%x) SID(0x%x) send Rec to DID(0x%x), origin_oxid 0x%x", + hba->port_cfg.port_id, sq_info->rport_index, + sq_info->local_port_id, + sq_info->remote_port_id, + els_req->wd4.orign_oxid); + } +} + +void hifc_build_els_wqe_ts_magic_num(struct hifcoe_sqe_s *v_sqe, + unsigned short els_cmnd_type, + unsigned int v_magic_num) +{ + struct hifcoe_sqe_t_els_gs_s *els_req; + struct hifcoe_sqe_els_rsp_s *els_rsp; + + if (els_cmnd_type == ELS_ACC || els_cmnd_type == ELS_RJT) { + els_rsp = &v_sqe->ts_sl.cont.els_rsp; + els_rsp->magic_num = v_magic_num; + } else { + els_req = &v_sqe->ts_sl.cont.t_els_gs; + els_req->magic_num = v_magic_num; + } +} + +void hifc_build_gs_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, + unsigned int magic_num) +{ + struct hifcoe_sqe_ts_s *v_sqe_ts = NULL; + struct hifcoe_sqe_t_els_gs_s *gs_req = NULL; + + v_sqe_ts = &v_sqe->ts_sl; + v_sqe_ts->task_type = HIFC_SQE_GS_CMND; + + gs_req = &v_sqe_ts->cont.t_els_gs; + gs_req->magic_num = magic_num; +} + +void hifc_build_bls_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, + unsigned int abts_param, + unsigned int magic_num) +{ + struct hifcoe_sqe_abts_s *abts_ts; + + v_sqe->ts_sl.task_type = HIFC_SQE_BLS_CMND; + abts_ts = &v_sqe->ts_sl.cont.abts; + abts_ts->fh_parm_abts = abts_param; + abts_ts->magic_num = magic_num; +} + +void hifc_build_service_wqe_root_ts(void *v_hba, + struct hifc_root_sqe_s *v_rt_sqe, + unsigned int rx_id, unsigned int rport_id, + unsigned int scq_num) +{ + unsigned char data_cos = 0; + unsigned int port_id = 0; + unsigned int service_type = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *parent_queue_info = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + port_id = HIFC_GET_HBA_PORT_ID(hba); + service_type = HIFC_GET_SERVICE_TYPE(hba); + + if (rport_id >= UNF_HIFC_MAXRPORT_NUM) { + data_cos = HIFC_GET_PACKET_COS(service_type); + } else { + parent_queue_info = + &hba->parent_queue_mgr->parent_queues[rport_id]; + data_cos = parent_queue_info->queue_data_cos; + } + + v_rt_sqe->task_section.fc_dw0.exch_id = rx_id; + v_rt_sqe->task_section.fc_dw0.host_id = 0; + v_rt_sqe->task_section.fc_dw0.port_id = port_id; + v_rt_sqe->task_section.fc_dw0.off_load = HIFC_NO_OFFLOAD; + v_rt_sqe->task_section.fc_dw3.rport_index = HIFC_LSW(rport_id); + v_rt_sqe->task_section.fc_dw3.scq_num = HIFC_LSW(scq_num); + v_rt_sqe->task_section.fc_dw4.service_type = UNF_GET_SHIFTMASK( + service_type, 0, 0x1f); + v_rt_sqe->task_section.fc_dw4.pkt_type = HIFC_GET_PACKET_TYPE( + service_type); + v_rt_sqe->task_section.fc_dw4.pkt_cos = data_cos; +} + +void hifc_build_service_wqe_root_sge(struct hifc_root_sqe_s *v_rt_sqe, + void *v_buf_addr, + unsigned long long v_phy_addr, + unsigned int buf_len, + void *v_hba) +{ + unsigned long long frame_phy_addr; + + /* Enter the SGE and convert it to the big-endian mode. */ + frame_phy_addr = v_phy_addr; + v_rt_sqe->sge.buf_addr_hi = HIFC_HIGH_32_BITS(frame_phy_addr); + v_rt_sqe->sge.buf_addr_lo = HIFC_LOW_32_BITS(frame_phy_addr); + v_rt_sqe->sge.wd0.buf_len = buf_len; + v_rt_sqe->sge.wd0.ext_flag = 0; + v_rt_sqe->sge.wd1.rsvd = 0; + hifc_cpu_to_big32(&v_rt_sqe->sge, sizeof(v_rt_sqe->sge)); + + /* Converting FC Frames into big Ends */ + hifc_cpu_to_big32(v_buf_addr, buf_len); +} + +void hifc_build_service_wqe_ctx_sge(struct hifc_root_sqe_s *v_rt_sqe, + unsigned long long v_ctxt_addr, + unsigned int buf_len) +{ + /* The SGE is filled in and converted to the big-endian mode. */ + v_rt_sqe->ctx_sge.buf_addr_hi = HIFC_HIGH_32_BITS(v_ctxt_addr); + v_rt_sqe->ctx_sge.buf_addr_lo = HIFC_LOW_32_BITS(v_ctxt_addr); + v_rt_sqe->ctx_sge.wd0.buf_len = buf_len; + v_rt_sqe->ctx_sge.wd0.ext_flag = 0; + v_rt_sqe->ctx_sge.wd1.rsvd = 0; + + hifc_cpu_to_big32(&v_rt_sqe->ctx_sge, sizeof(v_rt_sqe->ctx_sge)); +} + +void hifc_build_els_wqe_root_offload(struct hifc_root_sqe_s *v_rt_sqe, + dma_addr_t ctxt_addr, + unsigned int xid) +{ + /* update Task Section DW0.OFFLOAD */ + v_rt_sqe->task_section.fc_dw0.off_load = HIFC_HAVE_OFFLOAD; + + /* update Context GPA DW1~2 */ + v_rt_sqe->task_section.fc_dw1.context_gpa_hi = + HIFC_HIGH_32_BITS(ctxt_addr); + v_rt_sqe->task_section.fc_dw2.context_gpa_lo = + HIFC_LOW_32_BITS(ctxt_addr); + + /* fill Context DW4 */ + v_rt_sqe->task_section.fc_dw4.parent_xid = xid; + v_rt_sqe->task_section.fc_dw4.csize = HIFC_CNTX_SIZE_T_256B; + + /* The sqe of the offload request has two sge. The first is the packet, + * and the second is the ctx. + */ + v_rt_sqe->ctrl_section.ch.wd0.bdsl = + 2 * HIFC_BYTES_TO_QW_NUM(sizeof(struct hifc_root_sge_s)); +} + +void hifc_build_service_wqe_ctrl_section(struct hifcoe_wqe_ctrl_s *v_wqe_cs, + unsigned int ts_size, + unsigned int bdsi) +{ + v_wqe_cs->ch.wd0.bdsl = bdsi; + v_wqe_cs->ch.wd0.drv_sl = 0; + v_wqe_cs->ch.wd0.rsvd0 = 0; + v_wqe_cs->ch.wd0.wf = 0; + v_wqe_cs->ch.wd0.cf = 0; + v_wqe_cs->ch.wd0.tsl = ts_size; + v_wqe_cs->ch.wd0.va = 0; + v_wqe_cs->ch.wd0.df = 0; + v_wqe_cs->ch.wd0.cr = 1; + v_wqe_cs->ch.wd0.dif_sl = 0; + v_wqe_cs->ch.wd0.csl = 0; + /* divided by 8 */ + v_wqe_cs->ch.wd0.ctrl_sl = HIFC_BYTES_TO_QW_NUM(sizeof(*v_wqe_cs)); + v_wqe_cs->ch.wd0.owner = 0; +} + +void hifc_build_wqe_owner_pmsn(struct hifcoe_wqe_ctrl_s *v_wqe_cs, + unsigned short owner, + unsigned short pmsn) +{ + v_wqe_cs->qsf.wqe_sn = pmsn; + v_wqe_cs->qsf.dump_wqe_sn = v_wqe_cs->qsf.wqe_sn; + v_wqe_cs->ch.wd0.owner = (unsigned int)owner; +} + +void hifc_convert_parent_wqe_to_big_endian(struct hifcoe_sqe_s *v_sqe) +{ + if (likely((v_sqe->ts_sl.task_type != HIFCOE_TASK_T_TRESP) && + (v_sqe->ts_sl.task_type != HIFCOE_TASK_T_TMF_RESP))) { + /* + * Convert Control Secton and Task Section to big-endian. Before + * the SGE enters the queue, the upper-layer driver converts the + * SGE and Task Section to the big-endian mode. + */ + hifc_cpu_to_big32(&v_sqe->ctrl_sl, sizeof(v_sqe->ctrl_sl)); + hifc_cpu_to_big32(&v_sqe->ts_sl, sizeof(v_sqe->ts_sl)); + } else { + /* + * The HIFCOE_TASK_T_TRESP may use the SGE as the Task Section + * to convert the entire SQE into a large end. + */ + hifc_cpu_to_big32(v_sqe, sizeof(struct hifcoe_sqe_tresp_s)); + } +} + +void hifc_convert_root_wqe_to_big_endian(struct hifc_root_sqe_s *v_sqe) +{ + hifc_cpu_to_big32(&v_sqe->ctrl_section, sizeof(v_sqe->ctrl_section)); + hifc_cpu_to_big32(&v_sqe->task_section, sizeof(v_sqe->task_section)); +} + +void hifc_build_cmdqe_common(union hifc_cmdqe_u *cmdqe, + enum hifcoe_task_type_e task_type, + unsigned short rx_id) +{ + cmdqe->common.wd0.task_type = task_type; + cmdqe->common.wd0.rx_id = rx_id; + cmdqe->common.wd0.rsvd0 = 0; +} + +#define HIFC_STANDARD_SIRT_ENABLE 1 +#define HIFC_STANDARD_SIRT_DISABLE 0 +#define HIFC_UNKNOWN_ID 0xFFFF + +void hifc_build_icmnd_wqe_ts_header(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + unsigned char task_type, + unsigned short exit_base, + unsigned char v_port_idx) +{ + v_sqe->ts_sl.local_xid = UNF_GET_OXID(v_pkg) + exit_base; + v_sqe->ts_sl.task_type = task_type; + v_sqe->ts_sl.wd0.conn_id = + (unsigned short)(v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]); + + v_sqe->ts_sl.wd0.remote_xid = HIFC_UNKNOWN_ID; +} + +void hifc_build_icmnd_wqe_ts(void *v_hba, struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_ts_s *v_sqe_ts) +{ + struct hifcoe_sqe_icmnd_s *icmd = &v_sqe_ts->cont.icmnd; + void *phy_add = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + v_sqe_ts->cdb_type = 0; + memcpy(icmd->fcp_cmnd_iu, v_pkg->fcp_cmnd, + sizeof(struct unf_fcp_cmnd_s)); + + icmd->magic_num = UNF_GETXCHGALLOCTIME(v_pkg); + + if (v_pkg->unf_rsp_pload_bl.buffer_ptr) { + phy_add = (void *)v_pkg->unf_rsp_pload_bl.buf_dma_addr; + icmd->rsp_gpa_hi = HIFC_HIGH_32_BITS(phy_add); + icmd->rsp_gpa_lo = HIFC_LOW_32_BITS(phy_add); + } else { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]INI Build WQE sense buffer should not be null,sid_did (0x%x_0x%x) oxid(0x%x) pkg type(0x%x) hot pool tag(0x%x).", + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did, + UNF_GET_OXID(v_pkg), + v_pkg->type, UNF_GET_XCHG_TAG(v_pkg)); + } + + if (v_sqe_ts->task_type != HIFC_SQE_FCP_ITMF) { + icmd->info.tmf.w0.bs.reset_exch_start = hba->exit_base; + icmd->info.tmf.w0.bs.reset_exch_end = hba->exit_base + + hba->exit_count - 1; + + icmd->info.tmf.w1.bs.reset_did = UNF_GET_DID(v_pkg); + /* delivers the marker status flag to the microcode. */ + icmd->info.tmf.w1.bs.marker_sts = 1; + HIFC_GET_RESET_TYPE(UNF_GET_TASK_MGMT_FLAGS( + v_pkg->fcp_cmnd->control), + icmd->info.tmf.w1.bs.reset_type); + + icmd->info.tmf.w2.bs.reset_sid = UNF_GET_SID(v_pkg); + + memcpy(icmd->info.tmf.reset_lun, v_pkg->fcp_cmnd->lun, + sizeof(icmd->info.tmf.reset_lun)); + } +} + +void hifc_build_srq_wqe_ctrls(struct hifcoe_rqe_s *v_rqe, + unsigned short owner, + unsigned short pmsn) +{ + struct hifcoe_wqe_ctrl_ch_s *wqe_ctrls = NULL; + + wqe_ctrls = &v_rqe->ctrl_sl.ch; + wqe_ctrls->wd0.owner = owner; + wqe_ctrls->wd0.ctrl_sl = sizeof(struct hifcoe_wqe_ctrl_s) >> 3; + wqe_ctrls->wd0.csl = 1; + wqe_ctrls->wd0.dif_sl = 0; + wqe_ctrls->wd0.cr = 1; + wqe_ctrls->wd0.df = 0; + wqe_ctrls->wd0.va = 0; + wqe_ctrls->wd0.tsl = 0; + wqe_ctrls->wd0.cf = 0; + wqe_ctrls->wd0.wf = 0; + wqe_ctrls->wd0.drv_sl = sizeof(struct hifcoe_rqe_drv_s) >> 3; + wqe_ctrls->wd0.bdsl = sizeof(struct hifcoe_constant_sge_s) >> 3; + + v_rqe->ctrl_sl.wd0.wqe_msn = pmsn; + v_rqe->ctrl_sl.wd0.dump_wqe_msn = v_rqe->ctrl_sl.wd0.wqe_msn; +} diff --git a/drivers/scsi/huawei/hifc/hifc_wqe.h b/drivers/scsi/huawei/hifc/hifc_wqe.h new file mode 100644 index 000000000000..991b3b8ce00e --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_wqe.h @@ -0,0 +1,486 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_WQE_H__ +#define __HIFC_WQE_H__ + +#include "hifcoe_wqe.h" +#include "hifcoe_parent_context.h" + +/* TGT WQE type */ +/* DRV->uCode via Root or Parent SQ */ +#define HIFC_SQE_FCP_TRD HIFCOE_TASK_T_TREAD +#define HIFC_SQE_FCP_TWR HIFCOE_TASK_T_TWRITE +#define HIFC_SQE_FCP_TRSP HIFCOE_TASK_T_TRESP +#define HIFC_SQE_FCP_TACK HIFCOE_TASK_T_TACK +#define HIFC_SQE_ELS_CMND HIFCOE_TASK_T_ELS +#define HIFC_SQE_ELS_RSP HIFCOE_TASK_T_ELS_RSP +#define HIFC_SQE_GS_CMND HIFCOE_TASK_T_GS +#define HIFC_SQE_BLS_CMND HIFCOE_TASK_T_ABTS +#define HIFC_SQE_FCP_IREAD HIFCOE_TASK_T_IREAD +#define HIFC_SQE_FCP_IWRITE HIFCOE_TASK_T_IWRITE +#define HIFC_SQE_FCP_ITMF HIFCOE_TASK_T_ITMF +#define HIFC_SQE_SESS_RST HIFCOE_TASK_T_SESS_RESET +#define HIFC_SQE_FCP_TMF_TRSP HIFCOE_TASK_T_TMF_RESP + +/* DRV->uCode Via CMDQ */ +#define HIFC_CMDQE_ABTS_RSP HIFCOE_TASK_T_ABTS_RSP +#define HIFC_CMDQE_ABORT HIFCOE_TASK_T_ABORT +#define HIFC_CMDQE_SESS_DIS HIFCOE_TASK_T_SESS_DIS +#define HIFC_CMDQE_SESS_DEL HIFCOE_TASK_T_SESS_DEL + +/* uCode->Drv Via CMD SCQ */ +#define HIFC_SCQE_FCP_TCMND HIFCOE_TASK_T_RCV_TCMND +#define HIFC_SCQE_ELS_CMND HIFCOE_TASK_T_RCV_ELS_CMD +#define HIFC_SCQE_ABTS_CMD HIFCOE_TASK_T_RCV_ABTS_CMD +#define HIFC_SCQE_FCP_IRSP HIFCOE_TASK_T_IRESP +#define HIFC_SCQE_FCP_ITMF_RSP HIFCOE_TASK_T_ITMF_RESP + +/* uCode->Drv Via STS SCQ */ +#define HIFC_SCQE_FCP_TSTS HIFCOE_TASK_T_TSTS +#define HIFC_SCQE_GS_RSP HIFCOE_TASK_T_RCV_GS_RSP +#define HIFC_SCQE_ELS_RSP HIFCOE_TASK_T_RCV_ELS_RSP +#define HIFC_SCQE_ABTS_RSP HIFCOE_TASK_T_RCV_ABTS_RSP +#define HIFC_SCQE_ELS_RSP_STS HIFCOE_TASK_T_ELS_RSP_STS +#define HIFC_SCQE_ABTS_RSP_STS HIFCOE_TASK_T_ABTS_RSP_STS +#define HIFC_SCQE_ABORT_STS HIFCOE_TASK_T_ABORT_STS +#define HIFC_SCQE_SESS_EN_STS HIFCOE_TASK_T_SESS_EN_STS +#define HIFC_SCQE_SESS_DIS_STS HIFCOE_TASK_T_SESS_DIS_STS +#define HIFC_SCQE_SESS_DEL_STS HIFCOE_TASK_T_SESS_DEL_STS +#define HIFC_SCQE_SESS_RST_STS HIFCOE_TASK_T_SESS_RESET_STS +#define HIFC_SCQE_ITMF_MARKER_STS HIFCOE_TASK_T_ITMF_MARKER_STS +#define HIFC_SCQE_ABTS_MARKER_STS HIFCOE_TASK_T_ABTS_MARKER_STS +#define HIFC_SCQE_FLUSH_SQ_STS HIFCOE_TASK_T_FLUSH_SQ_STS +#define HIFC_SCQE_BUF_CLEAR_STS HIFCOE_TASK_T_BUFFER_CLEAR_STS +#define HIFC_SCQE_CLEAR_SRQ_STS HIFCOE_TASK_T_CLEAR_SRQ_STS + +#define HIFC_LOW_32_BITS(__addr) \ + ((unsigned int)((unsigned long long)(__addr) & 0xffffffff)) +#define HIFC_HIGH_32_BITS(__addr)\ + ((unsigned int)(((unsigned long long)(__addr) >> 32) & 0xffffffff)) + +/* Error Code from SCQ */ +#define HIFC_COMPLETION_STATUS_SUCCESS FCOE_CQE_COMPLETED +#define HIFC_COMPLETION_STATUS_ABORTED_SETUP_FAIL FCOE_IMMI_CMDPKT_SETUP_FAIL + +#define HIFC_COMPLETION_STATUS_TIMEOUT FCOE_ERROR_CODE_E_D_TIMER_EXPIRE +#define HIFC_COMPLETION_STATUS_DIF_ERROR FCOE_ERROR_CODE_DATA_DIFX_FAILED +#define HIFC_COMPLETION_STATUS_DATA_OOO FCOE_ERROR_CODE_DATA_OOO_RO +#define HIFC_COMPLETION_STATUS_DATA_OVERFLOW \ + FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS + +#define HIFC_SCQE_INVALID_CONN_ID 0xffff +#define HIFC_GET_SCQE_TYPE(scqe) ((scqe)->common.ch.wd0.task_type) +#define HIFC_GET_SCQE_STATUS(scqe) ((scqe)->common.ch.wd0.err_code) +#define HIFC_GET_SCQE_REMAIN_CNT(scqe) ((scqe)->common.ch.wd0.cqe_remain_cnt) +#define HIFC_GET_SCQE_CONN_ID(scqe) ((scqe)->common.conn_id) +#define HIFC_GET_WQE_TYPE(wqe) ((wqe)->ts_sl.task_type) + +#define HIFC_WQE_IS_IO(wqe) \ + (HIFC_GET_WQE_TYPE(wqe) != HIFC_SQE_SESS_RST) + +#define HIFC_SCQE_HAS_ERRCODE(scqe) \ + (HIFC_GET_SCQE_STATUS(scqe) != HIFC_COMPLETION_STATUS_SUCCESS) + +#define HIFC_SCQE_ERR_TO_CM(scqe)\ + (HIFC_GET_SCQE_STATUS(scqe) != FCOE_ELS_GS_RSP_EXCH_CHECK_FAIL) +#define HIFC_SCQE_CONN_ID_VALID(scqe) \ + (HIFC_GET_SCQE_CONN_ID(scqe) != HIFC_SCQE_INVALID_CONN_ID) + +#define HIFC_WQE_SECTION_CHUNK_SIZE 8 /* 8 bytes' chunk */ +#define HIFC_T_RESP_WQE_CTR_TSL_SIZE 15 /* 8 bytes' chunk */ +#define HIFC_T_RD_WR_WQE_CTR_TSL_SIZE 9 /* 8 bytes' chunk */ +#define HIFC_T_RD_WR_WQE_CTR_BDSL_SIZE 4 /* 8 bytes' chunk */ +#define HIFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE 1 /* 8 bytes' chunk */ + +#define HIFC_WQE_SGE_ENTRY_NUM 2 /* BD SGE and DIF SGE count */ +#define HIFC_WQE_SGE_DIF_ENTRY_NUM 1 /* DIF SGE count */ +#define HIFC_WQE_SGE_LAST_FLAG 1 +#define HIFC_WQE_SGE_NOT_LAST_FLAG 0 +#define HIFC_WQE_SGE_EXTEND_FLAG 1 +#define HIFC_WQE_SGE_NOT_EXTEND_FLAG 0 + +#define HIFC_FCP_TMF_PORT_RESET 0 +#define HIFC_FCP_TMF_LUN_RESET 1 +#define HIFC_FCP_TMF_TGT_RESET 2 +#define HIFC_FCP_TMF_RSVD 3 +#define HIFC_NO_OFFLOAD 0 +#define HIFC_HAVE_OFFLOAD 1 +#define HIFC_QID_SQ 0 + +#define HIFC_ADJUST_DATA(old_val, new_val) ((old_val) = (new_val)) + +#define HIFC_GET_RESET_TYPE(tmf_flag, reset_flag) \ + do { \ + switch (tmf_flag) { \ + case UNF_FCP_TM_ABORT_TASK_SET: \ + case UNF_FCP_TM_LOGICAL_UNIT_RESET: \ + reset_flag = HIFC_FCP_TMF_LUN_RESET; \ + break; \ + case UNF_FCP_TM_TARGET_RESET: \ + reset_flag = HIFC_FCP_TMF_TGT_RESET; \ + break; \ + case UNF_FCP_TM_CLEAR_TASK_SET: \ + reset_flag = HIFC_FCP_TMF_PORT_RESET; \ + break; \ + default: \ + reset_flag = HIFC_FCP_TMF_RSVD; \ + } \ + } while (0) + +/* + * nic_wqe_ctrl_sec table define + */ +struct nic_wqe_ctrl_sec { + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* marks ownership of WQE */ + u32 owner : 1; + /* Control Section Length */ + u32 ctrl_sec_len : 2; + /* Completion Section Length */ + u32 completion_sec_len : 2; + /* DIF Section Length */ + u32 dif_sec_len : 3; + /* + * Completion is Required - marks CQE generation request + * per WQE + */ + u32 cr : 1; + /* Data Format - format of BDS */ + u32 df : 1; + /* Virtual Address */ + u32 va : 1; + /* Task Section Length */ + u32 task_sec_len : 5; + /* Completion Format */ + u32 cf : 1; + u32 wf : 1; + /* reserved */ + u32 rsvd : 4; + /* Driver Section Length */ + u32 drv_sec_len : 2; + /* Buffer Descriptors Section Length */ + u32 buf_desc_sec_len : 8; +#else + /* Buffer Descriptors Section Length */ + u32 buf_desc_sec_len : 8; + /* Driver Section Length */ + u32 drv_sec_len : 2; + /* reserved */ + u32 rsvd : 4; + u32 wf : 1; + /* Completion Format */ + u32 cf : 1; + /* Task Section Length */ + u32 task_sec_len : 5; + /* Virtual Address */ + u32 va : 1; + /* Data Format - format of BDS */ + u32 df : 1; + /* + * Completion is Required - marks CQE generation request + * per WQE + */ + u32 cr : 1; + /* DIF Section Length */ + u32 dif_sec_len : 3; + /* Completion Section Length */ + u32 completion_sec_len : 2; + /* Control Section Length */ + u32 ctrl_sec_len : 2; + /* marks ownership of WQE */ + u32 owner : 1; +#endif + } bs; + + u32 dw; + }; +}; + +/* + * nic_rq_sge_sec table define + */ +struct nic_rq_sge_sec { + /* packet buffer address high */ + u32 wb_addr_high; + /* packet buffer address low */ + u32 wb_addr_low; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd : 1; + /* SGE length */ + u32 length : 31; +#else + /* SGE length */ + u32 length : 31; + u32 rsvd : 1; +#endif + } bs0; + u32 dw0; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* 0:list,1:last */ + u32 list : 1; + /* 0:normal,1:pointer to next SGE */ + u32 extension : 1; + /* key or unsed */ + u32 key : 30; +#else + /* key or unsed */ + u32 key : 30; + /* 0:normal,1:pointer to next SGE */ + u32 extension : 1; + /* 0:list,1:last */ + u32 list : 1; +#endif + } bs1; + u32 dw1; + }; +}; + +/* + * nic_rq_bd_sec table define + */ +struct nic_rq_bd_sec { + /* packet buffer address high */ + u32 pkt_buf_addr_high; + /* packet buffer address low */ + u32 pkt_buf_addr_low; +}; + +/* + * nic_rq_wqe table define + */ +struct nic_rq_wqe { + struct nic_wqe_ctrl_sec rq_wqe_ctrl_sec; + u32 rsvd; + struct nic_rq_sge_sec rx_sge; + struct nic_rq_bd_sec pkt_buf_addr; +}; + +/* Link WQE structure */ +struct hifc_link_wqe_s { + union { + struct { + unsigned int rsv1 : 14; + unsigned int wf : 1; + unsigned int rsv2 : 14; + unsigned int ctrlsl : 2; + unsigned int o : 1; + } wd0; + u32 val_wd0; + }; + + union { + struct { + unsigned int msn : 16; + unsigned int dump_msn : 15; + /* lp means whether O bit is overturn */ + unsigned int lp : 1; + } wd1; + unsigned int val_wd1; + }; + + unsigned int next_page_addr_hi; + unsigned int next_page_addr_lo; +}; + +struct hifc_root_rq_complet_info_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned int done : 1; /* done bit,ucode will set to 1 */ + unsigned int rsvd1 : 6; + unsigned int fc_pkt : 1; /* Marks whether the packet is fc type */ + unsigned int rsvd2 : 24; +#else + unsigned int rsvd2 : 24; + unsigned int fc_pkt : 1; /* Marks whether the packet is fc type */ + unsigned int rsvd1 : 6; + unsigned int done : 1; /* done bit,ucode will set to 1 */ +#endif + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned short buf_length; + unsigned short exch_id; +#else + unsigned short exch_id; + unsigned short buf_length; +#endif + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned short sts_only; /* If only CMPL SECTION */ + unsigned short status; /* 0:no err;!0:others */ +#else + unsigned short status; /* 0:no err;!0:others */ + unsigned short sts_only; /* If only CMPL SECTION */ +#endif + unsigned int magic_num; + unsigned int rsvd[4]; +}; + +/* Parent SQ WQE */ +struct hifc_root_sge_s { + unsigned int buf_addr_hi; + unsigned int buf_addr_lo; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned int ext_flag : 1; + unsigned int buf_len : 31; +#else + unsigned int buf_len : 31; + unsigned int ext_flag : 1; +#endif + } wd0; + struct { + unsigned int rsvd; + } wd1; +}; + +/* Root SQ WQE Task Section structure for FC */ +struct hifc_root_sqe_task_section_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned int task_type : 8; + /* 1:offload enable,0:offload disable. */ + unsigned int off_load : 1; + unsigned int port_id : 4; + unsigned int host_id : 2; + unsigned int rsvd1 : 1; + unsigned int exch_id : 16; +#else + unsigned int exch_id : 16; + unsigned int rsvd1 : 1; + unsigned int host_id : 2; + unsigned int port_id : 4; + unsigned int off_load : 1; + unsigned int task_type : 8; +#endif + } fc_dw0; + + union { + unsigned int context_gpa_hi; + unsigned int magic_num; + } fc_dw1; + + struct { + unsigned int context_gpa_lo; + } fc_dw2; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned short scq_num; /* SCQ num */ + unsigned short rport_index; /* RPort */ +#else + unsigned short rport_index; /* RPort */ + unsigned short scq_num; /* SCQ num */ +#endif + } fc_dw3; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + unsigned int pkt_type : 1; /* pkt type 0:ETH, 1:FC */ + unsigned int pkt_cos : 3; + unsigned int rsvd2 : 1; + unsigned int csize : 2; + unsigned int service_type : 5; + unsigned int parent_xid : 20; +#else + unsigned int parent_xid : 20; + unsigned int service_type : 5; + unsigned int csize : 2; + unsigned int rsvd2 : 1; + unsigned int pkt_cos : 3; /* pkt cos,4:ETH, 0:FC */ + unsigned int pkt_type : 1; /* pkt type 0:ETH, 1:FC */ +#endif + } fc_dw4; + + struct { + unsigned int rsvd; + } fc_dw5; + +}; + +/* Root SQ WQE */ +struct hifc_root_sqe_s { + /* Control Section */ + struct hifcoe_wqe_ctrl_s ctrl_section; + struct hifc_root_sqe_task_section_s task_section; + struct hifc_root_sge_s sge; + struct hifc_root_sge_s ctx_sge; +}; + +/* Parent SQ WQE and Root SQ WQE Related function */ +void hifc_build_service_wqe_ctrl_section(struct hifcoe_wqe_ctrl_s *v_wqe_cs, + unsigned int ts_size, + unsigned int bdsl); +void hifc_build_service_wqe_ts_common(struct hifcoe_sqe_ts_s *v_sqe_ts, + unsigned int rport_index, + unsigned short local_xid, + unsigned short remote_xid, + unsigned short data_len); +void hifc_build_els_gs_wqe_sge(struct hifcoe_sqe_s *v_sqe, void *v_buf_addr, + unsigned long long v_phyaddr, + unsigned int buf_len, + unsigned int xid, void *v_hba); +void hifc_build_els_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, + void *v_sq_info, unsigned short cmnd, + unsigned int v_scqn, void *v_frame_pld); + +void hifc_build_els_wqe_ts_rsp(struct hifcoe_sqe_s *v_sqe, void *v_sq_info, + void *v_frame_pld, unsigned short type, + unsigned short cmnd, unsigned int v_scqn); +void hifc_build_els_wqe_ts_magic_num(struct hifcoe_sqe_s *v_sqe, + unsigned short els_cmnd_type, + unsigned int v_magic_num); +void hifc_build_gs_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, + unsigned int magic_num); +void hifc_build_bls_wqe_ts_req(struct hifcoe_sqe_s *v_sqe, + unsigned int abts_param, + unsigned int magic_num); +void hifc_build_service_wqe_root_ts(void *v_hba, + struct hifc_root_sqe_s *v_rt_sqe, + unsigned int rx_id, unsigned int rport_id, + unsigned int scq_num); +void hifc_build_service_wqe_root_sge(struct hifc_root_sqe_s *v_rt_sqe, + void *v_buf_addr, + unsigned long long v_phyaddr, + unsigned int buf_len, + void *v_hba); +void hifc_build_els_wqe_root_offload(struct hifc_root_sqe_s *v_rt_sqe, + dma_addr_t ctx_addr, + unsigned int xid); +void hifc_build_wqe_owner_pmsn(struct hifcoe_wqe_ctrl_s *v_wqe_cs, + unsigned short owner, + unsigned short pmsn); +void hifc_convert_parent_wqe_to_big_endian(struct hifcoe_sqe_s *v_sqe); +void hifc_convert_root_wqe_to_big_endian(struct hifc_root_sqe_s *v_sqe); +void hifc_build_icmnd_wqe_ts(void *v_hba, struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_ts_s *v_sqe_ts); +void hifc_build_icmnd_wqe_ts_header(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe, + unsigned char v_task_type, + unsigned short v_exi_base, + unsigned char v_port_idx); +void hifc_build_cmdqe_common(union hifc_cmdqe_u *cmdqe, + enum hifcoe_task_type_e task_type, + unsigned short rx_id); +void hifc_build_srq_wqe_ctrls(struct hifcoe_rqe_s *v_rqe, unsigned short owner, + unsigned short pmsn); +void hifc_build_common_wqe_ctrls(struct hifcoe_wqe_ctrl_s *v_ctrl_sl, + unsigned char v_task_len); +void hifc_build_service_wqe_ctx_sge(struct hifc_root_sqe_s *v_rt_sqe, + unsigned long long v_ctx_addr, + unsigned int buf_len); +void hifc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg_s *v_pkg, + struct hifcoe_sqe_s *v_sqe); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifcoe_parent_context.h b/drivers/scsi/huawei/hifc/hifcoe_parent_context.h new file mode 100644 index 000000000000..91673338dd4a --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifcoe_parent_context.h @@ -0,0 +1,414 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __HIFCOE_PARENT_CONTEXT_H__ +#define __HIFCOE_PARENT_CONTEXT_H__ + +enum fc_parent_status_e { + FCOE_PARENT_STATUS_INVALID = 0, + FCOE_PARENT_STATUS_NORMAL, + FCOE_PARENT_STATUS_CLOSING +}; + +#define HIFCOE_DOUBLE_SGL (1) +#define HIFCOE_SINGLE_SGL (0) + +#define HIFCOE_DIX_ALGORITHM_IP (1) +#define HIFCOE_DIX_ALGORITHM_CRC (0) + +#define HIFCOE_PARENT_CONTEXT_KEY_ALIGN_SIZE (48) + +#define HIFCOE_PARENT_CONTEXT_SRQ_QINFO_SIZE (8) +#define HIFCOE_PARENT_CONTEXT_TIMER_SIZE (32) /* 24+2*N,N=timer count */ +#define HIFCOE_RQ_FILLED_OFFSET \ + ((u8)(u32)& \ + (((struct hifcoe_sw_section_s *)0x0)->occupy_by_rqe_filled_flag)) +#define HIFCOE_RW_LOCK_AREA_OFFSET \ + ((u8)(u32)&\ + (((struct hifcoe_sw_section_s *)0x0)->occupy_by_rw_lock_area)) + +/* "fqg_level_eventiq_info_s" should be care if MAX_EVENTIQ_LEVEL is larger + * than 4 + */ +#define MAX_EVENTIQ_LEVEL 4 +#define MAX_EVENTIQ_LEVEL_SHIFT 2 + +#define SP_FEATRUE_EDTR 0x1 +#define SP_FEATRUE_SEQ_CNT 0x2 + +#define MAX_PKT_SIZE_PER_DISPATCH (FC_PARENT_P->per_xmit_data_size) +#define MAX_PKT_SIZE_PER_DISPATCH_DIF_4K \ + (MAX_PKT_SIZE_PER_DISPATCH + ((MAX_PKT_SIZE_PER_DISPATCH >> 12) << 3)) +#define MAX_PKT_SIZE_PER_DISPATCH_DIF_512B \ + (MAX_PKT_SIZE_PER_DISPATCH + ((MAX_PKT_SIZE_PER_DISPATCH >> 9) << 3)) +#define MAX_PKT_SIZE_PER_DISPATCH_DIF(shift) \ + (MAX_PKT_SIZE_PER_DISPATCH +\ + ((u32)((MAX_PKT_SIZE_PER_DISPATCH >> 9) >> (shift)) << 3)) + +/* immidiate data DIF info definition in parent context */ +struct immi_dif_info_s { + union { + u32 value; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 pdu_difx_cnt :8; + u32 sct_size :1;/* Sector size, 1: 4K; 0: 512 */ + u32 dif_verify_type :2; /* verify type */ + u32 dif_ins_rep_type:2; /* ins&rep type */ + u32 io_1st_pdu :1; + /* Check blocks whose application tag contains + * 0xFFFF flag + */ + u32 difx_app_esc :1; + u32 difx_ref_esc :1; + /* + * Check blocks whose reference tag contains 0xFFFF flag + */ + u32 grd_ctrl :3; /* The DIF/DIX Guard control */ + /* Bit 0: DIF/DIX guard verify algorithm control */ + u32 grd_agm_ctrl :2; + /* + * Bit 1: DIF/DIX guard replace or insert algorithm + * control + */ + u32 grd_agm_ini_ctrl :3; + /* The DIF/DIX Reference tag control */ + u32 ref_tag_ctrl :3; + /* Bit 0: scenario of the reference tag verify mode */ + u32 ref_tag_mode :2; + /* + * Bit 1: scenario of the reference tag insert/replace + * mode + */ + /* 0: fixed; 1: increasement;*/ + u32 app_tag_ctrl :3; /* DIF/DIX APP TAG Control */ + +#else + u32 app_tag_ctrl :3; /* DIF/DIX APP TAG Control */ + /* Bit 0: scenario of the reference tag verify mode */ + u32 ref_tag_mode :2; + /* + * Bit 1: scenario of the reference tag insert/replace + * mode + */ + /* 0: fixed; 1: increasement;*/ + /* The DIF/DIX Reference tag control */ + u32 ref_tag_ctrl :3; + u32 grd_agm_ini_ctrl :3; + /* Bit 0: DIF/DIX guard verify algorithm control */ + u32 grd_agm_ctrl :2; + /* + * Bit 1: DIF/DIX guard replace or insert algorithm + * control + */ + u32 grd_ctrl :3; /* The DIF/DIX Guard control */ + /* + * Check blocks whose reference tag contains 0xFFFF flag + */ + u32 difx_ref_esc :1; + /* + * Check blocks whose application tag contains 0xFFFF + * flag + */ + u32 difx_app_esc :1; + u32 io_1st_pdu :1; + u32 dif_ins_rep_type:2; /* ins&rep type */ + u32 dif_verify_type :2; /* verify type */ + u32 sct_size :1; /* Sector size, 1: 4K; 0: 512 */ + u32 pdu_difx_cnt :8; + +#endif + } info; + } dif_dw3; + + union { + u32 value; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 difx_len :11; /* DIF/DIFX total length */ + u32 difx_en :1; /* DIF/DIFX enable flag */ + u32 rsv0 :4; + u32 dif_cnt :16; +#else + u32 dif_cnt :16; + u32 rsv0 :4; + u32 difx_en :1; /* DIF/DIFX enable flag */ + u32 difx_len :11; /* DIF/DIFX total length */ +#endif + } info; + } dif_other; + + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rep_app_tag :16; + u32 cmp_app_tag :16; + #else + u32 cmp_app_tag :16; + u32 rep_app_tag :16; + #endif + /* + * The ref tag value for verify compare, do not support replace or + * insert ref tag + */ + u32 cmp_ref_tag; + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 cmp_app_tag_msk :16; + u32 rsv1 :16; +#else + u32 rsv1 :16; + u32 cmp_app_tag_msk :16; +#endif +}; + +/* parent context SW section definition: SW(80B) */ +struct hifcoe_sw_section_s { + /* RO fields */ + u32 scq_num_rcv_cmd; /* scq number used for cmd receive */ + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 xid; /* driver init */ +#else + struct { + u32 xid :13; + u32 vport :7; + u32 csctrl :8; + u32 rsvd0 :4; + } sw_ctxt_vport_xid; +#endif + u32 cid; /* ucode init */ + + u16 conn_id; + u16 immi_rq_page_size; + + u16 immi_taskid_min; + u16 immi_taskid_max; + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 vlan_id : 16; /* Vlan ID */ + /* phycial port to receive and transmit packet. */ + u32 port_id : 4; + /* + * new srq offset. Ucode use new srq to receive els/gs with big payload. + */ + u32 rsvd1 : 5; + u32 srr_support : 2; /* sequence retransmition support flag */ + u32 srv_type : 5; +#else + union { + u32 pctxt_val0; + struct { + u32 srv_type : 5; /* driver init */ + /* sequence retransmition support flag */ + u32 srr_support : 2; + u32 rsvd1 : 5; + u32 port_id : 4; /* driver init */ + u32 vlan_id : 16; /* driver init */ + } dw; + } sw_ctxt_misc; +#endif + + u16 oqid_rd; + u16 oqid_wr; + u32 per_xmit_data_size; + + /* RW fields */ + u32 cmd_scq_gpa_h; + u32 cmd_scq_gpa_l; + /* E_D_TOV timer value: value should be set on ms by driver */ + u32 e_d_tov_timer_val; + /* + * mfs unalined bytes of per 64KB dispatch; equal to + * "MAX_PKT_SIZE_PER_DISPATCH%info->parent->tx_mfs" + */ + u16 mfs_unaligned_bytes; + u16 tx_mfs; /* remote port max receive fc payload length */ + /* max data len allowed in xfer_rdy dis scenario*/ + u32 xfer_rdy_dis_max_len_remote; + u32 xfer_rdy_dis_max_len_local; + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* Double or single SGL, 1: double; 0: single */ + u32 sgl_num :1; + u32 write_xfer_rdy :1; /* WRITE Xfer_Rdy disable or enable */ + u32 rec_support :1; /* REC support flag */ + u32 conf_support :1; /* Response confirm support flag */ + u32 vlan_enable :1; /* Vlan enable flag */ + u32 e_d_tov :1; /* E_D_TOV Resolution, 0: ms, 1: us*/ + /* seq_cnt, 1: increament support, 0: increament not support */ + u32 seq_cnt :1; + /* 0:Target, 1:Initiator, 2:Target&Initiator */ + u32 work_mode :2; + /* used for parent context cache Consistency judgment,1: done*/ + u32 flush_done :1; + u32 oq_cos_cmd :3; /* esch oq cos for cmd/xferrdy/rsp */ + u32 oq_cos_data :3; /* esch oq cos for data */ + u32 cos :3; /* doorbell cos value */ + u32 status :8; /* status of flow*/ + u32 rsvd4 :2; + u32 priority :3; /* vlan priority */ +#else + union { + struct { + u32 priority : 3; /* vlan priority */ + u32 rsvd4 : 2; + u32 status : 8; /* status of flow*/ + u32 cos : 3; /* doorbell cos value */ + u32 oq_cos_data : 3; /* esch oq cos for data */ + /* esch oq cos for cmd/xferrdy/rsp */ + u32 oq_cos_cmd : 3; + /* + * used for parent context cache Consistency judgment, + * 1: done + */ + u32 flush_done : 1; + /* 0:Target, 1:Initiator, 2:Target&Initiator */ + u32 work_mode : 2; + u32 seq_cnt : 1; /* seq_cnt */ + u32 e_d_tov : 1; /* E_D_TOV resolution */ + u32 vlan_enable : 1; /* Vlan enable flag */ + /* Response confirm support flag */ + u32 conf_support : 1; + u32 rec_support : 1; /* REC support flag */ + /* WRITE Xfer_Rdy disable or enable */ + u32 write_xfer_rdy : 1; + /* Double or single SGL, 1: double; 0: single */ + u32 sgl_num : 1; + } dw; + u32 pctxt_val1; + } sw_ctxt_config; +#endif + /* immidiate data dif control info(20B) */ + struct immi_dif_info_s immi_dif_info; +}; + +struct hifcoe_hw_rsvd_queue_s { + /* bitmap[0]:255-192 */ + /* bitmap[1]:191-128 */ + /* bitmap[2]:127-64 */ + /* bitmap[3]:63-0 */ + u64 seq_id_bitmap[4]; + struct { + u64 last_req_seq_id : 8; + u64 xid : 20; + u64 rsvd0 : 36; + } wd0; +}; + +struct hifcoe_sq_qinfo_s { + u64 rsvd_0 : 10; + /* 0: get pmsn from queue header; 1: get pmsn from ucode */ + u64 pmsn_type : 1; + u64 rsvd_1 : 4; + u64 cur_wqe_o : 1; /* should be opposite from loop_o */ + u64 rsvd_2 : 48; + + u64 cur_sqe_gpa; + u64 pmsn_gpa; /* sq's queue header gpa */ + + u64 sqe_dmaattr_idx : 6; + u64 sq_so_ro : 2; + u64 rsvd_3 : 2; + u64 ring : 1; /* 0: link; 1: ring */ + u64 loop_o : 1; /* init to be the first round o-bit */ + u64 rsvd_4 : 4; + u64 zerocopy_dmaattr_idx : 6; + u64 zerocopy_so_ro : 2; + u64 parity : 8; + u64 rsvd_5 : 26; + u64 pcie_template : 6; +}; + +struct hifcoe_cq_qinfo_s { + u64 pcie_template_hi : 3; + u64 parity_2 : 1; + u64 cur_cqe_gpa : 60; + + u64 pi : 15; + u64 pi_o : 1; + u64 ci : 15; + u64 ci_o : 1; + /* if init_mode = 2, is msi/msi-x; other the low-5-bit means c_eqn */ + u64 c_eqn_msi_x : 10; + u64 parity_1 : 1; + /* 0: get ci from queue header; 1: get ci from ucode */ + u64 ci_type : 1; + u64 cq_depth : 3; /* valid when ring = 1 */ + u64 armq : 1; /* 0: IDLE state; 1: NEXT state */ + u64 cur_cqe_cnt : 8; + u64 cqe_max_cnt : 8; + + u64 cqe_dmaattr_idx : 6; + u64 cq_so_ro : 2; + u64 init_mode : 2; /* 1: armQ; 2: msi/msi-x; others: rsvd */ + u64 next_o : 1; /* next pate valid o-bit */ + u64 loop_o : 1; /* init to be the first round o-bit */ + u64 next_cq_wqe_page_gpa : 52; + + u64 pcie_template_lo : 3; + u64 parity_0 : 1; + u64 ci_gpa : 60; /* cq's queue header gpa */ +}; + +struct hifcoe_scq_qinfo_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + union { + struct { + u64 parity : 6; + u64 rq_th2_preld_cache_num : 5; + u64 rq_th1_preld_cache_num : 5; + u64 rq_th0_preld_cache_num : 5; + u64 rq_min_preld_cache_num : 4; + u64 sq_th2_preld_cache_num : 5; + u64 sq_th1_preld_cache_num : 5; + u64 sq_th0_preld_cache_num : 5; + u64 sq_min_preld_cache_num : 4; + u64 scq_n : 20; /* scq number */ + } info; + + u64 pctxt_val1; + } hw_scqc_config; +#else + union { + struct { + u64 scq_n : 20; /* scq number */ + u64 sq_min_preld_cache_num : 4; + u64 sq_th0_preld_cache_num : 5; + u64 sq_th1_preld_cache_num : 5; + u64 sq_th2_preld_cache_num : 5; + u64 rq_min_preld_cache_num : 4; + u64 rq_th0_preld_cache_num : 5; + u64 rq_th1_preld_cache_num : 5; + u64 rq_th2_preld_cache_num : 5; + u64 parity : 6; + } info; + + u64 pctxt_val1; + } hw_scqc_config; +#endif +}; + +struct hifcoe_srq_qinfo_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u64 srqc_gpa : 60; + u64 parity : 4; +#else + u64 parity : 4; + u64 srqc_gpa : 60; +#endif +}; + +/* here is the layout of service type 12/13 */ +struct hifcoe_parent_context_s { + u8 key[HIFCOE_PARENT_CONTEXT_KEY_ALIGN_SIZE]; + struct hifcoe_scq_qinfo_s resp_scq_qinfo; + struct hifcoe_srq_qinfo_s imm_srq_info; + struct hifcoe_sq_qinfo_s sq_qinfo; + u8 timer_section[HIFCOE_PARENT_CONTEXT_TIMER_SIZE]; + struct hifcoe_hw_rsvd_queue_s hw_rsvdq; + struct hifcoe_srq_qinfo_s els_srq_info; + struct hifcoe_sw_section_s sw_section; +}; + +#endif diff --git a/drivers/scsi/huawei/hifc/hifcoe_wqe.h b/drivers/scsi/huawei/hifc/hifcoe_wqe.h new file mode 100644 index 000000000000..442a52be4580 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifcoe_wqe.h @@ -0,0 +1,1698 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __HIFCOE_WQE_H__ +#define __HIFCOE_WQE_H__ + +/* + * TASK TYPE: in order to compatible wiht EDA, please add new type before BUTT. + */ +enum hifcoe_task_type_e { + HIFCOE_TASK_T_EMPTY = 0,/* SCQE TYPE: means task type not initialize */ + + HIFCOE_TASK_T_IWRITE = 1, /* SQE TYPE: ini send FCP Write Command */ + HIFCOE_TASK_T_IREAD = 2,/* SQE TYPE: ini send FCP Read Command */ + /* SCQE TYPE: ini recv fcp rsp for IREAD/IWRITE/ITMF*/ + HIFCOE_TASK_T_IRESP = 3, + HIFCOE_TASK_T_TCMND = 4,/* NA */ + HIFCOE_TASK_T_TREAD = 5,/* SQE TYPE: tgt send FCP Read Command */ + /* SQE TYPE: tgt send FCP Write Command (XFER_RDY) */ + HIFCOE_TASK_T_TWRITE = 6, + HIFCOE_TASK_T_TRESP = 7,/* SQE TYPE: tgt send fcp rsp of Read/Write*/ + HIFCOE_TASK_T_TSTS = 8, /* SCQE TYPE: tgt sts for TREAD/TWRITE/TRESP*/ + HIFCOE_TASK_T_ABTS = 9, /* SQE TYPE: ini send abts request Command */ + HIFCOE_TASK_T_IELS = 10,/* NA */ + HIFCOE_TASK_T_ITMF = 11,/* SQE TYPE: ini send tmf request Command */ + HIFCOE_TASK_T_CLEAN_UP = 12,/* NA */ + HIFCOE_TASK_T_CLEAN_UP_ALL = 13,/* NA */ + HIFCOE_TASK_T_UNSOLICITED = 14, /* NA */ + HIFCOE_TASK_T_ERR_WARN = 15,/* NA */ + HIFCOE_TASK_T_SESS_EN = 16, /* CMDQ TYPE: enable session */ + HIFCOE_TASK_T_SESS_DIS = 17,/* NA */ + HIFCOE_TASK_T_SESS_DEL = 18,/* NA */ + HIFCOE_TASK_T_RQE_REPLENISH = 19, /* NA */ + + HIFCOE_TASK_T_RCV_TCMND = 20, /* SCQE TYPE: tgt recv fcp cmd */ + HIFCOE_TASK_T_RCV_ELS_CMD = 21, /* SCQE TYPE: tgt recv els cmd */ + HIFCOE_TASK_T_RCV_ABTS_CMD = 22,/* SCQE TYPE: tgt recv abts cmd */ + /* SCQE TYPE: tgt recv immidiate data */ + HIFCOE_TASK_T_RCV_IMMIDIATE = 23, + /* + * SQE TYPE: send ESL rsp. PLOGI_ACC, PRLI_ACC will carry the parent + * context parameter indication. + */ + + HIFCOE_TASK_T_ELS_RSP = 24, + HIFCOE_TASK_T_ELS_RSP_STS = 25, /* SCQE TYPE: ELS rsp sts */ + + HIFCOE_TASK_T_ABTS_RSP = 26,/* CMDQ TYPE: tgt send abts rsp */ + HIFCOE_TASK_T_ABTS_RSP_STS = 27,/* SCQE TYPE: tgt abts rsp sts*/ + + HIFCOE_TASK_T_ABORT = 28, /* CMDQ TYPE: tgt send Abort Command */ + HIFCOE_TASK_T_ABORT_STS = 29, /* SCQE TYPE: Abort sts */ + + HIFCOE_TASK_T_ELS = 30, /* SQE TYPE: send ELS request Command */ + HIFCOE_TASK_T_RCV_ELS_RSP = 31, /* SCQE TYPE: recv ELS response */ + + HIFCOE_TASK_T_GS = 32, /* SQE TYPE: send GS request Command */ + HIFCOE_TASK_T_RCV_GS_RSP = 33, /* SCQE TYPE: recv GS response */ + + HIFCOE_TASK_T_SESS_EN_STS = 34, /* SCQE TYPE: enable session sts */ + HIFCOE_TASK_T_SESS_DIS_STS = 35,/* NA */ + HIFCOE_TASK_T_SESS_DEL_STS = 36,/* NA */ + + HIFCOE_TASK_T_RCV_ABTS_RSP = 37,/* SCQE TYPE: ini recv abts rsp */ + + HIFCOE_TASK_T_BUFFER_CLEAR = 38,/* CMDQ TYPE: Buffer Clear */ + HIFCOE_TASK_T_BUFFER_CLEAR_STS = 39,/* SCQE TYPE: Buffer Clear sts */ + HIFCOE_TASK_T_FLUSH_SQ = 40,/* CMDQ TYPE: flush sq */ + HIFCOE_TASK_T_FLUSH_SQ_STS = 41,/* SCQE TYPE: flush sq sts */ + + HIFCOE_TASK_T_SESS_RESET = 42, /* SQE TYPE: Reset session */ + HIFCOE_TASK_T_SESS_RESET_STS = 43, /* SCQE TYPE: Reset session sts */ + HIFCOE_TASK_T_RQE_REPLENISH_STS = 44, /* NA */ + HIFCOE_TASK_T_DUMP_EXCH = 45, /* CMDQ TYPE: dump exch */ + HIFCOE_TASK_T_INIT_SRQC = 46, /* CMDQ TYPE: init SRQC */ + HIFCOE_TASK_T_CLEAR_SRQ = 47, /* CMDQ TYPE: clear SRQ */ + HIFCOE_TASK_T_CLEAR_SRQ_STS = 48, /* SCQE TYPE: clear SRQ sts */ + HIFCOE_TASK_T_INIT_SCQC = 49, /* CMDQ TYPE: init SCQC */ + HIFCOE_TASK_T_DEL_SCQC = 50,/* CMDQ TYPE: delete SCQC */ + HIFCOE_TASK_T_TMF_RESP = 51,/* SQE TYPE: tgt send tmf rsp */ + HIFCOE_TASK_T_DEL_SRQC = 52,/* CMDQ TYPE: delete SRQC */ + /* SCQE TYPE: tgt recv continue immidiate data */ + HIFCOE_TASK_T_RCV_IMMI_CONTINUE = 53, + HIFCOE_TASK_T_ITMF_RESP = 54, /* SCQE TYPE: ini recv tmf rsp */ + HIFCOE_TASK_T_ITMF_MARKER_STS = 55,/* SCQE TYPE: tmf marker sts */ + HIFCOE_TASK_T_TACK = 56, + HIFCOE_TASK_T_SEND_AEQERR = 57, + HIFCOE_TASK_T_ABTS_MARKER_STS = 58,/* SCQE TYPE: abts marker sts */ + HIFCOE_TASK_T_FLR_CLEAR_IO = 59,/* FLR clear io type*/ + HIFCOE_TASK_T_BUTT +}; + +/* + * error code for error report + */ +enum hifcoe_err_code_e { + FCOE_CQE_COMPLETED = 0, /* Successful */ + FCOE_SESS_HT_INSERT_FAIL = 1,/* Offload fail: hash insert fail */ + FCOE_SESS_HT_INSERT_DUPLICATE = 2, /* Offload fail: duplicate offload */ + FCOE_SESS_HT_BIT_SET_FAIL = 3, /* Offload fail: bloom filter set fail */ + /* Offload fail: hash delete fail(duplicate delete) */ + FCOE_SESS_HT_DELETE_FAIL = 4, + FCOE_CQE_BUFFER_CLEAR_IO_COMPLETED = 5, /* IO done in buffer clear */ + /* IO done in session rst mode=1 */ + FCOE_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED = 6, + /* IO done in session rst mode=3 */ + FCOE_CQE_SESSION_RST_CLEAR_IO_COMPLETED = 7, + FCOE_CQE_TMF_RSP_IO_COMPLETED = 8, /* IO done in tgt tmf rsp */ + FCOE_CQE_TMF_IO_COMPLETED = 9, /* IO done in ini tmf */ + FCOE_CQE_DRV_ABORT_IO_COMPLETED = 10,/* IO done in tgt abort */ + /* IO done in fcp rsp process. Used for the sceanrio: + * 1.abort before cmd + * 2.send fcp rsp directly after recv cmd + */ + FCOE_CQE_DRV_ABORT_IO_IN_RSP_COMPLETED = 11, + /* IO done in fcp cmd process. Used for the sceanrio: + * 1.abort before cmd + * 2.child setup fail + */ + FCOE_CQE_DRV_ABORT_IO_IN_CMD_COMPLETED = 12, + FCOE_CQE_WQE_FLUSH_IO_COMPLETED = 13,/* IO done in FLUSH SQ */ + /* fcp data format check: DIFX check error */ + FCOE_ERROR_CODE_DATA_DIFX_FAILED = 14, + /* fcp data format check: task_type is not read */ + FCOE_ERROR_CODE_DATA_TASK_TYPE_INCORRECT = 15, + /* fcp data format check: data offset is not continuous */ + FCOE_ERROR_CODE_DATA_OOO_RO = 16, + /* fcp data format check: data is over run */ + FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS = 17, + /* fcp rsp format check: payload is too short */ + FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD = 18, + /* fcp rsp format check: fcp_conf need, but exch don't hold seq + * initiative + */ + FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET = 19, + /* fcp rsp format check: fcp_conf is required, but it's the last seq */ + FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ = 20, + /* xfer rdy format check: payload is too short */ + FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE = 21, + /* xfer rdy format check: last data out havn't finished */ + FCOE_ERROR_CODE_XFER_PEND_XFER_SET = 22, + /* xfer rdy format check: data offset is not continuous */ + FCOE_ERROR_CODE_XFER_OOO_RO = 23, + /* xfer rdy format check: burst len is 0 */ + FCOE_ERROR_CODE_XFER_NULL_BURST_LEN = 24, + FCOE_ERROR_CODE_REC_TIMER_EXPIRE = 25, /* Timer expire: REC_TIMER */ + FCOE_ERROR_CODE_E_D_TIMER_EXPIRE = 26, /* Timer expire: E_D_TIMER */ + FCOE_ERROR_CODE_ABORT_TIMER_EXPIRE = 27,/* Timer expire: Abort timer */ + /* Abort IO magic number mismatch */ + FCOE_ERROR_CODE_ABORT_MAGIC_NUM_NOT_MATCH = 28, + /* RX immidiate data cmd pkt child setup fail */ + FCOE_IMMI_CMDPKT_SETUP_FAIL = 29, + /* RX fcp data sequence id not equal */ + FCOE_ERROR_CODE_DATA_SEQ_ID_NOT_EQUAL = 30, + FCOE_ELS_GS_RSP_EXCH_CHECK_FAIL = 31,/* ELS/GS exch info check fail */ + FCOE_CQE_ELS_GS_SRQE_GET_FAIL = 32, /* ELS/GS process get SRQE fail */ + FCOE_CQE_DATA_DMA_REQ_FAIL = 33, /* SMF soli-childdma rsp error */ + FCOE_CQE_SESSION_CLOSED = 34,/* Session is closed */ + FCOE_SCQ_IS_FULL = 35, /* SCQ is full */ + FCOE_SRQ_IS_FULL = 36, /* SRQ is full */ + FCOE_ERROR_DUCHILDCTX_SETUP_FAIL = 37, /* dpchild ctx setup fail */ + FCOE_ERROR_INVALID_TXMFS = 38, /* invalid txmfs */ + /* offload fail,lack of SCQE,through AEQ */ + FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL = 39, + FCOE_ERROR_INVALID_TASK_ID = 40, /* tx invlaid task id */ + FCOE_ERROR_INVALID_PKT_LEN = 41, /* tx els gs pakcet len check */ + FCOE_CQE_ELS_GS_REQ_CLR_IO_COMPLETED = 42, /* IO done in els gs tx */ + FCOE_CQE_ELS_RSP_CLR_IO_COMPLETED = 43, /* IO done in els rsp tx */ + FCOE_ERROR_CODE_RESID_UNDER_ERR = 44 /* FCP RSP RESID ERROR */ +}; + +/* AEQ EVENT TYPE */ +enum hifcoe_aeq_evt_type_e { + /* + * SCQ and SRQ not enough, HOST will initiate a operation to associated + * SCQ/SRQ + */ + FC_AEQ_EVENT_QUEUE_ERROR = 48, + /* WQE MSN check error,HOST will reset port */ + FC_AEQ_EVENT_WQE_FATAL_ERROR = 49, + /* serious chip error, HOST will reset chip */ + FC_AEQ_EVENT_CTX_FATAL_ERROR = 50, + FC_AEQ_EVENT_OFFLOAD_ERROR = 51, + + FC_FC_AEQ_EVENT_TYPE_LAST +}; + +enum hifcoe_aeq_evt_err_code_e { + /* detail type of resource lack */ + FC_SCQ_IS_FULL_ERR = 0, + FC_SRQ_IS_FULL_ERR, + + /* detail type of FC_AEQ_EVENT_WQE_FATAL_ERROR */ + FC_SQE_CHILD_SETUP_WQE_MSN_ERR = 2, + FC_SQE_CHILD_SETUP_WQE_GPA_ERR, + FC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_1, + FC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_2, + FC_CLEAEQ_WQE_ERR, + FC_WQEFETCH_WQE_MSN_ERR, + FC_WQEFETCH_QUINFO_ERR, + + /* detail type of FC_AEQ_EVENT_CTX_FATAL_ERROR */ + FC_SCQE_ERR_BIT_ERR = 9, + FC_UPDMA_ADDR_REQ_SRQ_ERR, + FC_SOLICHILDDMA_ADDR_REQ_ERR, + FC_UNSOLICHILDDMA_ADDR_REQ_ERR, + FC_SQE_CHILD_SETUP_QINFO_ERR_1, + FC_SQE_CHILD_SETUP_QINFO_ERR_2, + FC_CMDPKT_CHILD_SETUP_QINFO_ERR_1, + FC_CMDPKT_CHILD_SETUP_QINFO_ERR_2, + FC_CMDPKT_CHILD_SETUP_PMSN_ERR, + FC_CLEAEQ_CTX_ERR, + FC_WQEFETCH_CTX_ERR, + FC_FLUSH_QPC_ERR_LQP, + FC_FLUSH_QPC_ERR_SMF, + FC_PREFETCH_QPC_ERR_1, + FC_PREFETCH_QPC_ERR_2, + FC_PREFETCH_QPC_ERR_3, + FC_PREFETCH_QPC_ERR_4, + FC_PREFETCH_QPC_ERR_5, + FC_PREFETCH_QPC_ERR_6, + FC_PREFETCH_QPC_ERR_7, + FC_PREFETCH_QPC_ERR_8, + FC_PREFETCH_QPC_ERR_9, + FC_PREFETCH_QPC_ERR_10, + FC_PREFETCH_QPC_ERR_11, + FC_PREFETCH_QPC_ERR_DEFAULT, + FC_CHILDHASH_INSERT_SW_ERR, + FC_CHILDHASH_LOOKUP_SW_ERR, + FC_CHILDHASH_DEL_SW_ERR, + FC_FLOWHASH_INSERT_SW_ERR, + FC_FLOWHASH_LOOKUP_SW_ERR, + FC_FLOWHASH_DEL_SW_ERR, + FC_FLUSH_QPC_ERR_USED, + FC_FLUSH_QPC_ERR_OUTER_LOCK, + + FC_AEQ_EVT_ERR_CODE_BUTT + +}; + +/* AEQ data structure */ +struct hifcoe_aqe_data_s { + union { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 evt_code: 8; + u32 rsvd: 8; + u32 conn_id : 16; /* conn_id */ + #else + u32 conn_id : 16; + u32 rsvd: 8; + u32 evt_code: 8; + #endif + } wd0; + + u32 data0; + }; + + union { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd: 12; + u32 xid : 20; /* xid */ + #else + u32 xid : 20; /* xid */ + u32 rsvd: 12; + #endif + } wd1; + + u32 data1; + }; +}; + +/* Control Section: Common Header */ +struct hifcoe_wqe_ctrl_ch_s { + union { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 owner : 1; + u32 ctrl_sl : 2; + u32 csl : 2; + u32 dif_sl: 3; + u32 cr: 1; + u32 df: 1; + u32 va: 1; + u32 tsl : 5; + u32 cf: 1; + u32 wf: 1; + u32 rsvd0 : 4; + u32 drv_sl: 2; + u32 bdsl : 8; + #else + u32 bdsl : 8; + u32 drv_sl: 2; + u32 rsvd0 : 4; + u32 wf: 1; + u32 cf: 1; + u32 tsl : 5; + u32 va: 1; + u32 df: 1; + u32 cr: 1; + u32 dif_sl: 3; + u32 csl : 2; + u32 ctrl_sl: 2; + u32 owner : 1; + #endif + } wd0; + + u32 ctrl_ch_val; + }; + +}; + +/* Control Section: Queue Specific Field */ +struct hifcoe_wqe_ctrl_qsf_s { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 dump_wqe_sn : 16; + u32 wqe_sn:16; + #else + u32 wqe_sn:16; + u32 dump_wqe_sn : 16; + #endif +}; + +/* DIF info definition in WQE */ +struct hifcoe_fc_dif_info_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* difx enable flag:1'b0: disable;1'b1: enable */ + u32 difx_en : 1; + /* + * sector size:1'b0: sector size is 512B.1'b1: sector size is + * 4KB. + */ + u32 sct_size : 1; + u32 difx_len : 11; + /* + * The DIFX verify type: 2'b00: Type0, 2'b01: Type 1, 2'b10: + * Type 2, 2'b11: Type 3 + */ + u32 dif_verify_type : 2; + /* + * The DIFX insert and replace type: 2'b00: Type0, 2'b01: Type 1 + * , 2'b10: Type 2, 2'b11: Type 3 + */ + u32 dif_ins_rep_type : 2; + u32 difx_app_esc : 1; + u32 difx_ref_esc : 1; + u32 grd_ctrl : 3; + u32 grd_agm_ctrl : 2; + u32 grd_agm_ini_ctrl : 3; + u32 ref_tag_ctrl : 3; + u32 ref_tag_mode : 2; + #else + u32 ref_tag_mode : 2; + u32 ref_tag_ctrl : 3; + u32 grd_agm_ini_ctrl : 3; + u32 grd_agm_ctrl : 2; + u32 grd_ctrl : 3; + u32 difx_ref_esc : 1; + u32 difx_app_esc : 1; + u32 dif_ins_rep_type : 2; + u32 dif_verify_type : 2; + u32 difx_len : 11; + u32 sct_size : 1; + u32 difx_en : 1; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 app_tag_ctrl : 3; + u32 vpid : 7; + u32 lun_qos_en : 2; + u32 rsvd : 4; + u32 cmp_app_tag_msk : 16; + #else + u32 cmp_app_tag_msk : 16; + u32 rsvd : 4; + u32 lun_qos_en : 2; + u32 vpid : 7; + u32 app_tag_ctrl : 3; + #endif + } wd1; + + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 rep_app_tag; + u16 cmp_app_tag; + #else + u16 cmp_app_tag; + u16 rep_app_tag; + #endif + + u32 cmp_ref_tag; + u32 rep_ref_tag; + +}; + +/* Task Section: TMF SQE for INI */ +struct hifcoe_tmf_info_s { + union { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 reset_exch_start :16; + u32 reset_exch_end :16; + #else + u32 reset_exch_end :16; + u32 reset_exch_start :16; + #endif + } bs; + u32 value; + } w0; + + union { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :5; + u32 marker_sts :1; + u32 reset_type :2; + u32 reset_did :24; + #else + u32 reset_did :24; + u32 reset_type :2; + u32 marker_sts :1; + u32 rsvd0 :5; + #endif + } bs; + u32 value; + } w1; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :8; + u32 reset_sid :24; +#else + u32 reset_sid :24; + u32 rsvd0 :8; +#endif + } bs; + u32 value; + } w2; + + u8 reset_lun[8]; +}; + +/* Task Section: CMND SQE for INI */ +struct hifcoe_sqe_icmnd_s { + u8 fcp_cmnd_iu[48]; + union { + struct hifcoe_fc_dif_info_s dif_info; + struct hifcoe_tmf_info_s tmf; + } info; + + u32 magic_num; + u32 rsp_gpa_hi; + u32 rsp_gpa_lo; +}; + +/* Task Section: ABTS SQE */ +struct hifcoe_sqe_abts_s { + u32 fh_parm_abts; + u32 magic_num; +}; + +struct hifcoe_keys_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsv : 16; + u32 smac0 : 8; + u32 smac1 : 8; +#else + u32 smac1 : 8; + u32 smac0 : 8; + u32 rsv : 16; +#endif + } wd0; + + u8 smac[4]; + + u8 dmac[6]; + u8 sid[3]; + u8 did[3]; + + u32 svlan; + u32 cvlan; +}; + +/* BDSL: Session Enable WQE */ +/* keys field only use 26 bytes room */ +struct hifcoe_cmdqe_sess_en_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rx_id : 16; + #else + u32 rx_id : 16; + u32 rsvd0 : 8; + u32 task_type : 8; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd1 : 12; + u32 cid : 20; + #else + u32 cid : 20; + u32 rsvd1 : 12; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 scqn :16; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 scqn :16; + #endif + } wd2; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd3 :12; + u32 xid_p :20; +#else + u32 xid_p :20; + u32 rsvd3 :12; +#endif + } wd3; + + u32 context_gpa_hi; + u32 context_gpa_lo; + struct hifcoe_keys_s keys; +}; + +/* Control Section */ +struct hifcoe_wqe_ctrl_s { + struct hifcoe_wqe_ctrl_ch_s ch; + struct hifcoe_wqe_ctrl_qsf_s qsf; +}; + +struct hifcoe_sqe_els_rsp_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * ELS RSP packet payload. ELS RSP payload GPA is store in BDSL, ucode + * use child setup to send data(do not include fc_hdr/eth_hdr) + */ + u32 data_len:16; + u32 echo_flag :16; + #else + u32 echo_flag :16; + u32 data_len:16; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* Para Update:drv indicate Parent Context para need to be + * update or not. + * 00---no update + * 01---send PLOGI_ACC, need to updata Port para + * 10---send PRLI_ACC, need to updata process para + * 11---Reserved + */ + u32 para_update :2; + u32 clr_io :1; + u32 lp_bflag:1; /* use for loopback */ + u32 rsvd1 :28; + #else + u32 rsvd1 :28; + u32 lp_bflag:1; + u32 clr_io :1; + u32 para_update :2; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 tx_mfs :16; + u32 rsvd2 :14; + u32 e_d_tov :1; + u32 seq_cnt :1; + #else + u32 seq_cnt :1; + u32 e_d_tov :1; + u32 rsvd2 :14; + u32 tx_mfs :16; + #endif + } wd2; + + u32 e_d_tov_timer_val; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 immi_taskid_start:16; + u32 immi_taskid_cnt :13; + u32 xfer_dis:1; + u32 rec :1; + u32 conf:1; + #else + u32 conf:1; + u32 rec :1; + u32 xfer_dis:1; + u32 immi_taskid_cnt :13; + u32 immi_taskid_start:16; + #endif + } wd4; + + u32 first_burst_len; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 reset_exch_start :16; + u32 reset_exch_end:16; + #else + u32 reset_exch_end:16; + u32 reset_exch_start :16; + #endif + } wd6; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd:16; + u32 scqn:16; + #else + u32 scqn:16; + u32 rsvd:16; + #endif + } wd7; + + u32 magic_num; + u32 magic_local; + u32 magic_remote; + u32 ts_rcv_echo_req; +}; + +struct hifcoe_sqe_reset_session_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 reset_exch_start :16; + u32 reset_exch_end:16; + #else + u32 reset_exch_end:16; + u32 reset_exch_start :16; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd:6; + /* + * 1: clean io; + * 2: delete session; + * 3: clean io&delete session + */ + u32 mode:2; + u32 reset_did :24; + #else + u32 reset_did :24; + u32 mode:2; + u32 rsvd:6; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd:8; + u32 reset_sid :24; + #else + u32 reset_sid :24; + u32 rsvd:8; + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd:16; + u32 scqn:16; + #else + u32 scqn:16; + u32 rsvd:16; + #endif + } wd3; +}; + +struct hifcoe_sqe_t_els_gs_s { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * ELS/GS req packet payload. ELS/GS payload GPA is store in BDSL, + * ucode use child setup to send data(do not include fc_hdr/eth_hdr) + */ + u16 data_len; + u16 echo_flag; /* echo flag */ + #else + u16 echo_flag; + u16 data_len; + #endif + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* Para Update: drv indicate Parent Context para need to be + * update or not. + * 00---no update + * 01---send PRLI Req, need to updata Port para + * 10---Reserved + * 11---Reserved + */ + u32 para_update :2; + u32 clr_io :1; + u32 lp_bflag:1; /* use for loopback */ + u32 rec_support :1; + u32 rec_flag:1; + u32 orign_oxid :16; + u32 rsvd1 :10; + #else + u32 rsvd1 :10; + u32 orign_oxid :16; + u32 rec_flag:1; + u32 rec_support :1; + u32 lp_bflag:1; + u32 clr_io :1; + u32 para_update :2; + #endif + } wd4; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 tx_mfs :16; + u32 rsvd2 :14; + u32 e_d_tov :1; + u32 seq_cnt :1; + #else + u32 seq_cnt :1; + u32 e_d_tov :1; + u32 rsvd2 :14; + u32 tx_mfs :16; + #endif + } wd5; + + u32 e_d_tov_timer_val; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 reset_exch_start :16; + u32 reset_exch_end:16; + #else + u32 reset_exch_end:16; + u32 reset_exch_start :16; + #endif + } wd6; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd:16; + u32 scqn:16; + #else + u32 scqn:16; + u32 rsvd:16; + #endif + } wd7; + + u32 magic_num; +}; + +struct hifcoe_sqe_els_gs_elsrsp_comm_s { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 data_len; + u16 rsvd; + #else + u16 rsvd; + u16 data_len; + #endif +}; + +/* SQE Task Section's Contents except Common Header */ +union hifcoe_sqe_ts_cont_u { + struct hifcoe_sqe_icmnd_s icmnd; + struct hifcoe_sqe_abts_s abts; + struct hifcoe_sqe_els_rsp_s els_rsp; + struct hifcoe_sqe_t_els_gs_s t_els_gs; + struct hifcoe_sqe_els_gs_elsrsp_comm_s els_gs_elsrsp_comm; + struct hifcoe_sqe_reset_session_s reset_session; + u32 value[16]; +}; + +struct hifcoe_sqe_ts_s { + /* SQE Task Section's Common Header */ + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type :8; + u32 rsvd:5; /* used for loopback saving bdsl's num */ + /* cdb_type = 0:CDB_LEN = 16B, cdb_type = 1:CDB_LEN = 32B */ + u32 cdb_type:1; + /* standard immidiate data flag, use with local-xid for initiator */ + u32 immi_std:1; + /* + * CRC err inject flag: drv set, and ucode use for send first packet of + * WQE + */ + u32 crc_inj :1; + u32 local_xid :16; /* local exch_id */ + #else + u32 local_xid :16; + u32 crc_inj :1; + u32 immi_std:1; + /* cdb_type = 0:CDB_LEN = 16B, cdb_type = 1:CDB_LEN = 32B */ + u32 cdb_type:1; + u32 rsvd:5; /* used for loopback saving bdsl's num */ + u32 task_type :8; + #endif + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 remote_xid; /* remote exch_id */ + u16 conn_id; + #else + u16 conn_id; + u16 remote_xid; + #endif + } wd0; + + union hifcoe_sqe_ts_cont_u cont; +}; + +struct hifcoe_constant_sge_s { + u32 buf_addr_hi; + u32 buf_addr_lo; +}; + +struct hifcoe_variable_sge_s { + u32 buf_addr_hi; + u32 buf_addr_lo; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 r_flag :1; + u32 buf_len :31; + #else + u32 buf_len :31; + u32 r_flag :1; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 last_flag :1; + u32 extension_flag :1; + u32 xid : 14; + u32 buf_addr_gpa: 16; + #else + u32 buf_addr_gpa: 16; + u32 xid : 14; + u32 extension_flag :1; + u32 last_flag :1; + #endif + } wd1; +}; + +/* SQE, should not be over 128B */ +struct hifcoe_sqe_s { + struct hifcoe_wqe_ctrl_s ctrl_sl; + struct hifcoe_sqe_ts_s ts_sl; + struct hifcoe_variable_sge_s sge[2]; +}; + +struct hifcoe_rqe_ctrl_s { + struct hifcoe_wqe_ctrl_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 dump_wqe_msn; + u16 wqe_msn; + #else + u16 wqe_msn; + u16 dump_wqe_msn; + #endif + } wd0; +}; + +struct hifcoe_rqe_drv_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * User ID[15:0], 15 bits valid and User ID[15] is fix to 0 + */ + u32 user_id :16; + u32 rsvd0 :16; + #else + u32 rsvd0 :16; + u32 user_id :16; + #endif + } wd0; + + u32 rsvd1; +}; + +/* RQE,should not be over 32B */ +struct hifcoe_rqe_s { + struct hifcoe_rqe_ctrl_s ctrl_sl; + u32 cqe_gpa_h; + u32 cqe_gpa_l; + struct hifcoe_constant_sge_s bds_sl; + struct hifcoe_rqe_drv_s drv_sl; +}; + +struct hifcoe_cmdqe_abts_rsp_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rx_id : 16; + #else + u32 rx_id : 16; + u32 rsvd0 : 8; + u32 task_type : 8; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsp_type:1; /* 0:BA_ACC, 1:BA_RJT */ + u32 payload_len :7; + u32 port_id :4; + u32 rsvd1 :4; + u32 ox_id :16; + #else + u32 ox_id :16; + u32 rsvd1 :4; + u32 port_id :4; + u32 payload_len :7; + u32 rsp_type:1; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 scqn: 16; + u32 conn_id : 16; + #else + u32 conn_id : 16; + u32 scqn: 16; + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd: 12; + u32 xid : 20; + #else + u32 xid : 20; + u32 rsvd: 12; + #endif + } wd3; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd: 12; + u32 cid : 20; + #else + u32 cid : 20; + u32 rsvd: 12; + #endif + } wd4; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd: 16; + u32 req_rx_id : 16; + #else + u32 req_rx_id : 16; + u32 rsvd: 16; + #endif + } wd5; + + /* payload length is according to rsp_type:1DWORD or 3DWORD */ + u32 payload[3]; +}; + +struct hifcoe_cmdqe_buffer_clear_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 wqe_type:8; + u32 rsvd0 :8; + u32 rsvd1 :16; + #else + u32 rsvd1 :16; + u32 rsvd0 :8; + u32 wqe_type:8; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rx_id_start :16; + u32 rx_id_end :16; + #else + u32 rx_id_end :16; + u32 rx_id_start :16; + #endif + } wd1; + + u32 scqn; + u32 wd3; +}; + +struct hifcoe_cmdqe_flush_sq_info_s { + u32 cid; + u32 xid; +}; + +struct hifcoe_cmdqe_flush_sq_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 wqe_type :8; + u32 sq_qid :8; + u32 entry_count :16; + #else + u32 entry_count :16; + u32 sq_qid :8; + u32 wqe_type :8; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 last_wqe:1; + u32 pos :11; + u32 port_id:4; + u32 scqn:16; + #else + u32 scqn:16; + u32 port_id :4; + u32 pos :11; + u32 last_wqe:1; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 pkt_ptr :16; + u32 rsvd:16; + #else + u32 rsvd:16; + u32 pkt_ptr :16; + #endif + } wd2; + + struct hifcoe_cmdqe_flush_sq_info_s sq_info_entry[0]; +}; + +struct hifcoe_cmdqe_creat_srqc_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rsvd1 : 16; +#else + u32 rsvd1 : 16; + u32 rsvd0 : 8; + u32 task_type : 8; +#endif + } wd0; + + u32 srqc_gpa_h; + u32 srqc_gpa_l; + + u32 srqc[16];/* srqc_size=64B */ + +}; + +struct hifcoe_cmdqe_delete_srqc_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rsvd1 : 16; +#else + u32 rsvd1 : 16; + u32 rsvd0 : 8; + u32 task_type : 8; +#endif + } wd0; + + u32 srqc_gpa_h; + u32 srqc_gpa_l; +}; + +struct hifcoe_cmdqe_clr_srq_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rsvd1 : 16; +#else + u32 rsvd1 : 16; + u32 rsvd0 : 8; + u32 task_type : 8; +#endif + } wd0; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * 0: SRQ for recv ELS; + * 1: SRQ for recv immidiate data + */ + u32 srq_type: 16; + u32 scqn: 16; +#else + u32 scqn: 16; + u32 srq_type: 16; +#endif + } wd1; + + u32 srqc_gpa_h; + u32 srqc_gpa_l; +}; + +struct hifcoe_cmdqe_creat_scqc_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rsvd1 : 16; +#else + u32 rsvd1 : 16; + u32 rsvd0 : 8; + u32 task_type : 8; +#endif + } wd0; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd2 : 16; + u32 scqn: 16; +#else + u32 scqn: 16; + u32 rsvd2 : 16; +#endif + } wd1; + + u32 scqc[16];/* scqc_size=64B */ + +}; + +struct hifcoe_cmdqe_delete_scqc_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rsvd1 : 16; +#else + u32 rsvd1 : 16; + u32 rsvd0 : 8; + u32 task_type : 8; +#endif + } wd0; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd2 : 16; + u32 scqn: 16; +#else + u32 scqn: 16; + u32 rsvd2 : 16; +#endif + } wd1; +}; + +struct hifcoe_sqe_t_rsp_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 fill:2; /* 2bit of F_CTL[1:0] */ + u32 conf:1; /* Wait INI confirm, 0: disable, 1:enable */ + /* + * 0: payload area store payload, + * 1: payload area store payload GPA + */ + u32 mode:1; + u32 immi:1; + u32 rsvd0 :3; + u32 fcp_rsp_len :8; /* FCP_RESP payload(24~96B)*/ + u32 rsvd1 :16; + #else + u32 rsvd1 :16; + u32 fcp_rsp_len :8; + u32 rsvd0 :3; + u32 immi:1; + u32 mode:1; + u32 conf:1; + u32 fill:2; + #endif + } wd0; + + u32 magic_num; + u32 hotpooltag; + + union { + struct { + u32 addr_h; + u32 addr_l; + } gpa; + + struct { + u32 data[25]; /* FCP_RESP payload buf, 100B rsvd */ + } buf; + + } payload; + +}; + +struct hifcoe_sqe_tresp_ts_s { + /* SQE Task Section's Common Header */ + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u8 task_type; + u8 rsvd0; + u16 local_xid; + #else + u16 local_xid; + u8 rsvd0; + u8 task_type; + #endif + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 remote_xid; + u16 conn_id; + #else + u16 conn_id; + u16 remote_xid; + #endif + } wd0; + + struct hifcoe_sqe_t_rsp_s t_rsp; +}; + +/* SQE for fcp response, max TSL is 120B*/ +struct hifcoe_sqe_tresp_s { + struct hifcoe_wqe_ctrl_s ctrl_sl; + struct hifcoe_sqe_tresp_ts_s ts_sl; +}; + +/* SCQE Common Header */ +struct hifcoe_scqe_ch_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 owner : 1; + u32 err_code: 7; + u32 cqe_remain_cnt : 3; + u32 rsvd0 : 13; + u32 task_type : 8; + #else + u32 task_type : 8; + u32 rsvd0 : 13; + u32 cqe_remain_cnt : 3; + u32 err_code: 7; + u32 owner : 1; + #endif + } wd0; +}; + +struct hifcoe_scqe_type_s { + struct hifcoe_scqe_ch_s ch; + + u32 rsvd0; + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u16 rsvd4; + u16 conn_id; +#else + u16 conn_id; + u16 rsvd4; +#endif + + u32 rsvd1[12]; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd2 :31; + u32 done:1; + #else + u32 done:1; + u32 rsvd3 :31; + #endif + } wd0; +}; + +struct hifcoe_scqe_sess_sts_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd1 :12; + u32 xid_qpn :20; + #else + u32 xid_qpn :20; + u32 rsvd1 :12; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd3 :16; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 rsvd3 :16; + #endif + } wd1; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd2 :12; + u32 cid :20; +#else + u32 cid :20; + u32 rsvd2 :12; +#endif + } wd2; + + u64 bloomfilter_id; /* valid only in session offload */ + +}; + +struct hifcoe_scqe_comm_rsp_sts_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :16; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 rsvd0 :16; + #endif + } wd1; + + u32 magic_num; +}; + +struct hifcoe_scqe_iresp_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 dif_info:5; + u32 rsvd0 :11; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 rsvd0 :11; + u32 dif_info:5; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :16; + u32 fcp_flag:8; + u32 scsi_status :8; + #else + u32 scsi_status :8; + u32 fcp_flag:8; + u32 rsvd0 :16; + #endif + } wd2; + + u32 fcp_resid; + u32 fcp_sns_len; + u32 fcp_rsp_len; + u32 magic_num; +}; + +struct hifcoe_scqe_rcv_abts_rsp_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :16; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 rsvd0 :16; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0:24; + u32 fh_rctrl :8; + #else + u32 fh_rctrl :8; + u32 rsvd0:24; + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd1 :8; + u32 did :24; + #else + u32 did :24; + u32 rsvd1 :8; + #endif + } wd3; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd2 :8; + u32 sid :24; + #else + u32 sid :24; + u32 rsvd2 :8; + #endif + } wd4; + + /* payload length is according to fh_rctrl:1DWORD or 3DWORD */ + u32 payload[3]; + u32 magic_num; + +}; + +struct hifcoe_scqe_rcv_els_cmd_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd0 :8; + u32 did :24; + #else + u32 did :24; + u32 rsvd0 :8; + #endif + } wd0; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd1 :8; + u32 sid :24; + #else + u32 sid :24; + u32 rsvd1 :8; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd2; + + struct{ + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 data_len :16;/* ELS cmd Payload length */ + u32 user_id_num :16;/* current used user_id num */ + #else + u32 user_id_num :16; + u32 data_len :16; + #endif + } wd3; + + u32 user_id[9]; /* User ID of SRQ SGE, used for drvier buffer release */ + u32 ts; +}; + +struct hifcoe_scqe_rcv_els_gs_rsp_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 data_len:16; + u32 conn_id :16; + #else + u32 conn_id :16; + u32 data_len:16; /* ELS/GS RSP Payload length */ + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 end_rsp :1; + u32 echo_rsp:1; + u32 rsvd:6; + u32 did :24; + #else + u32 did :24; + u32 rsvd:6; + u32 echo_rsp:1; + u32 end_rsp :1; + #endif + } wd3; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 user_id_num :8; + u32 sid :24; + #else + u32 sid :24; + u32 user_id_num :8; + #endif + } wd4; + + u32 magic_num; + u32 user_id[9]; +}; + +struct hifcoe_scqe_rcv_flush_sts_s { + struct hifcoe_scqe_ch_s ch; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 last_flush : 8; + u32 port_id: 8; + u32 rsvd0 : 16; +#else + u32 rsvd0 : 16; + u32 port_id: 8; + u32 last_flush : 8; +#endif + } wd0; +}; + +struct hifcoe_scqe_rcv_clear_buf_sts_s { + struct hifcoe_scqe_ch_s ch; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 port_id: 8; + u32 rsvd0 : 24; +#else + u32 rsvd0 : 24; + u32 port_id: 8; +#endif + } wd0; +}; + +struct hifcoe_scqe_itmf_marker_sts_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 end_rsp :8; + u32 did :24; + #else + u32 did :24; + u32 end_rsp :8; + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd1:8; + u32 sid :24; + #else + u32 sid :24; + u32 rsvd1:8; + #endif + } wd3; +}; + +struct hifcoe_scqe_abts_marker_sts_s { + struct hifcoe_scqe_ch_s ch; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 ox_id :16; + u32 rx_id :16; + #else + u32 rx_id :16; + u32 ox_id :16; + #endif + } wd1; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 end_rsp :8; + u32 did :24; + #else + u32 did :24; + u32 end_rsp :8; + #endif + } wd2; + + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 io_state :8; + u32 sid :24; + #else + u32 sid :24; + u32 io_state :8; + #endif + } wd3; +}; + +/* SCQE, should not be over 64B */ +union hifcoe_scqe_u { + struct hifcoe_scqe_type_s common; + /* session enable/disable/delete sts */ + struct hifcoe_scqe_sess_sts_s sess_sts; + /* aborts/abts_rsp/els rsp sts */ + struct hifcoe_scqe_comm_rsp_sts_s comm_sts; + struct hifcoe_scqe_rcv_clear_buf_sts_s clear_sts;/* clear buffer sts */ + struct hifcoe_scqe_rcv_flush_sts_s flush_sts; /* flush sq sts */ + struct hifcoe_scqe_iresp_s iresp; + struct hifcoe_scqe_rcv_abts_rsp_s rcv_abts_rsp; /* recv abts rsp*/ + struct hifcoe_scqe_rcv_els_cmd_s rcv_els_cmd;/* recv els cmd */ + struct hifcoe_scqe_rcv_els_gs_rsp_s rcv_els_gs_rsp;/* recv els/gs rsp */ + struct hifcoe_scqe_itmf_marker_sts_s itmf_marker_sts;/* tmf marker */ + struct hifcoe_scqe_abts_marker_sts_s abts_marker_sts;/* abts marker */ +}; + +struct hifcoe_cmdqe_type_s { + struct { + #if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 task_type : 8; + u32 rsvd0 : 8; + u32 rx_id : 16; + #else + u32 rx_id : 16; + u32 rsvd0 : 8; + u32 task_type : 8; + #endif + } wd0; +}; + +/* CMDQE, variable length */ +union hifc_cmdqe_u { + struct hifcoe_cmdqe_type_s common; + struct hifcoe_cmdqe_sess_en_s session_enable; + struct hifcoe_cmdqe_abts_rsp_s snd_abts_rsp; + struct hifcoe_cmdqe_buffer_clear_s buffer_clear; + struct hifcoe_cmdqe_flush_sq_s flush_sq; + struct hifcoe_cmdqe_creat_srqc_s create_srqc; + struct hifcoe_cmdqe_delete_srqc_s delete_srqc; + struct hifcoe_cmdqe_clr_srq_s clear_srq; + struct hifcoe_cmdqe_creat_scqc_s create_scqc; + struct hifcoe_cmdqe_delete_scqc_s delete_scqc; +}; + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_io.c b/drivers/scsi/huawei/hifc/unf_io.c new file mode 100644 index 000000000000..f52b9927d7de --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_io.c @@ -0,0 +1,1338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "hifc_knl_adp.h" +#include "unf_log.h" +#include "unf_exchg.h" +#include "unf_rport.h" +#include "unf_io.h" +#include "unf_portman.h" +#include "unf_io_abnormal.h" + +#define UNF_GET_FCP_CTL(pkg) ((((pkg)->status) >> 8) & 0xFF) +#define UNF_GET_SCSI_STATUS(pkg) (((pkg)->status) & 0xFF) + +static unsigned int unf_io_success_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status); +static unsigned int unf_ini_error_default_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status); +static unsigned int unf_io_under_flow_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status); +static unsigned int unf_ini_dif_error_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status); + +struct unf_ini_error_handler { + unsigned int error_code; + unsigned int (*pfn_unf_ini_error_handler)(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status); +}; + +struct unf_ini_error_handler ini_error_handler_table[] = { + { UNF_IO_SUCCESS, unf_io_success_handler }, + { UNF_IO_ABORTED, unf_ini_error_default_handler }, + { UNF_IO_FAILED, unf_ini_error_default_handler }, + { UNF_IO_ABORT_ABTS, unf_ini_error_default_handler }, + { UNF_IO_ABORT_LOGIN, unf_ini_error_default_handler }, + { UNF_IO_ABORT_REET, unf_ini_error_default_handler }, + { UNF_IO_ABORT_FAILED, unf_ini_error_default_handler }, + { UNF_IO_OUTOF_ORDER, unf_ini_error_default_handler }, + { UNF_IO_FTO, unf_ini_error_default_handler }, + { UNF_IO_LINK_FAILURE, unf_ini_error_default_handler }, + { UNF_IO_OVER_FLOW, unf_ini_error_default_handler }, + { UNF_IO_RSP_OVER, unf_ini_error_default_handler }, + { UNF_IO_LOST_FRAME, unf_ini_error_default_handler }, + { UNF_IO_UNDER_FLOW, unf_io_under_flow_handler }, + { UNF_IO_HOST_PROG_ERROR, unf_ini_error_default_handler }, + { UNF_IO_SEST_PROG_ERROR, unf_ini_error_default_handler }, + { UNF_IO_INVALID_ENTRY, unf_ini_error_default_handler }, + { UNF_IO_ABORT_SEQ_NOT, unf_ini_error_default_handler }, + { UNF_IO_REJECT, unf_ini_error_default_handler }, + { UNF_IO_EDC_IN_ERROR, unf_ini_error_default_handler }, + { UNF_IO_EDC_OUT_ERROR, unf_ini_error_default_handler }, + { UNF_IO_UNINIT_KEK_ERR, unf_ini_error_default_handler }, + { UNF_IO_DEK_OUTOF_RANGE, unf_ini_error_default_handler }, + { UNF_IO_KEY_UNWRAP_ERR, unf_ini_error_default_handler }, + { UNF_IO_KEY_TAG_ERR, unf_ini_error_default_handler }, + { UNF_IO_KEY_ECC_ERR, unf_ini_error_default_handler }, + { UNF_IO_BLOCK_SIZE_ERROR, unf_ini_error_default_handler }, + { UNF_IO_ILLEGAL_CIPHER_MODE, unf_ini_error_default_handler }, + { UNF_IO_CLEAN_UP, unf_ini_error_default_handler }, + { UNF_IO_ABORTED_BY_TARGET, unf_ini_error_default_handler }, + { UNF_IO_TRANSPORT_ERROR, unf_ini_error_default_handler }, + { UNF_IO_LINK_FLASH, unf_ini_error_default_handler }, + { UNF_IO_TIMEOUT, unf_ini_error_default_handler }, + { UNF_IO_DMA_ERROR, unf_ini_error_default_handler }, + { UNF_IO_DIF_ERROR, unf_ini_dif_error_handler }, + { UNF_IO_INCOMPLETE, unf_ini_error_default_handler }, + { UNF_IO_DIF_REF_ERROR, unf_ini_dif_error_handler }, + { UNF_IO_DIF_GEN_ERROR, unf_ini_dif_error_handler } +}; + +void unf_done_ini_xchg(struct unf_xchg_s *v_xchg) +{ + /* + * About I/O Done + * 1. normal case + * 2. Send ABTS & RCVD RSP + * 3. Send ABTS & timer timeout + */ + struct unf_scsi_cmd_s scsi_cmd = { 0 }; + unsigned long flags = 0; + struct unf_scsi_cmd_info_s *scsi_cmnd_info = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int scsi_id = 0; + + UNF_CHECK_VALID(0x1301, TRUE, v_xchg, return); + + /* scsi_cmnd validity check */ + if (unlikely(!v_xchg->scsi_cmnd_info.scsi_cmnd)) + return; + + /* 1. Free RX_ID for INI SIRT: Do not care + * 2. set & check exchange state + * + * for Set UP_ABORT Tag: + * 1) L_Port destroy + * 2) AC power down + * 3) LUN reset + * 4) Target/Session reset + * 5) SCSI send Abort(ABTS) + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + v_xchg->io_state |= INI_IO_STATE_DONE; + if (unlikely(v_xchg->io_state & (INI_IO_STATE_UPABORT | + INI_IO_STATE_UPSEND_ERR | + INI_IO_STATE_TMF_ABORT))) { + /* + * a. UPABORT: scsi have send ABTS + * --->>> do not call SCSI_Done, return directly + * b. UPSEND_ERR: error happened duiring LLDD send SCSI_CMD + * --->>> do not call SCSI_Done, scsi need retry + */ + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_KEVENT, + "[event]Exchange(0x%p) Cmdsn:0x%lx upCmd:%p oxid(0x%x) with state(0x%x) has been aborted or send error", + v_xchg, (unsigned long)v_xchg->cmnd_sn, + v_xchg->scsi_cmnd_info.scsi_cmnd, v_xchg->ox_id, + v_xchg->io_state); + + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + /* here, return directly */ + return; + } + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + /* 3. Get scsi_cmnd info */ + scsi_cmnd_info = &v_xchg->scsi_cmnd_info; + + /* + * 4. Set: + * scsi_cmnd; + * cmnd_done_func; + * cmnd up_level_done; + * sense_buff_addr; + * resid_length; + * cmnd_result; + * dif_info + * + * UNF_SCSI_CMND <<-- UNF_SCSI_CMND_INFO + */ + UNF_SET_HOST_CMND((&scsi_cmd), scsi_cmnd_info->scsi_cmnd); + UNF_SET_CMND_DONE_FUNC((&scsi_cmd), scsi_cmnd_info->pfn_done); + scsi_cmd.drv_private = v_xchg->lport; + if (unlikely((UNF_SCSI_STATUS(v_xchg->scsi_cmnd_info.result)) & + FCP_SNS_LEN_VALID_MASK)) { + unf_save_sense_data( + scsi_cmd.upper_cmnd, + (char *)v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, + SCSI_SENSE_DATA_LEN); + } + UNF_SET_RESID((&scsi_cmd), (unsigned int)v_xchg->resid_len); + UNF_SET_CMND_RESULT((&scsi_cmd), scsi_cmnd_info->result); + memcpy(&scsi_cmd.dif_info, &v_xchg->dif_info, + sizeof(struct dif_info_s)); + + scsi_id = scsi_cmnd_info->scsi_id; + + /* 5. call scsi_cmnd_done func: unf_scsi_done */ + UNF_DONE_SCSI_CMND(&scsi_cmd); + + /* 6. Update IO result CNT */ + if (likely(v_xchg->lport)) { + scsi_image_table = &v_xchg->lport->rport_scsi_table; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, + (scsi_cmnd_info->result >> 16)); + } +} + +static inline unsigned int unf_ini_get_sgl_entry_buf( + ini_get_sgl_entry_buf pfn_unf_ini_get_sgl, + void *v_cmnd, + void *v_driver_sgl, + void **v_upper_sgl, + unsigned int *v_req_index, + unsigned int *v_index, + char **v_buf, + unsigned int *v_buf_len) +{ + if (unlikely(!pfn_unf_ini_get_sgl)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Command(0x%p) Get sgl Entry func Null.", v_cmnd); + + return UNF_RETURN_ERROR; + } + + return pfn_unf_ini_get_sgl(v_cmnd, v_driver_sgl, v_upper_sgl, + v_req_index, v_index, v_buf, v_buf_len); +} + +unsigned int unf_ini_get_sgl_entry(void *v_pkg, char **v_buf, + unsigned int *v_buf_len) +{ + struct unf_frame_pkg_s *pkg = (struct unf_frame_pkg_s *)v_pkg; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x1305, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1306, UNF_TRUE, v_buf, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1307, UNF_TRUE, v_buf_len, return UNF_RETURN_ERROR); + + xchg = (struct unf_xchg_s *)pkg->xchg_contex; + UNF_CHECK_VALID(0x1308, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + /* Get SGL Entry buffer for INI Mode */ + ret = unf_ini_get_sgl_entry_buf( + xchg->scsi_cmnd_info.pfn_unf_get_sgl_entry_buf, + xchg->scsi_cmnd_info.scsi_cmnd, + NULL, + &xchg->req_sgl_info.sgl, + &xchg->scsi_cmnd_info.port_id, + &((xchg->req_sgl_info).entry_index), + v_buf, v_buf_len); + + return ret; +} + +unsigned int unf_ini_get_dif_sgl_entry(void *v_pkg, char **v_buf, + unsigned int *v_buf_len) +{ + struct unf_frame_pkg_s *pkg = (struct unf_frame_pkg_s *)v_pkg; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x1305, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1306, UNF_TRUE, v_buf, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1307, UNF_TRUE, v_buf_len, return UNF_RETURN_ERROR); + + xchg = (struct unf_xchg_s *)pkg->xchg_contex; + UNF_CHECK_VALID(0x1308, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + /* Get SGL Entry buffer for INI Mode */ + ret = unf_ini_get_sgl_entry_buf( + xchg->scsi_cmnd_info.pfn_unf_get_sgl_entry_buf, + xchg->scsi_cmnd_info.scsi_cmnd, + NULL, + &xchg->dif_sgl_info.sgl, + &xchg->scsi_cmnd_info.port_id, + &xchg->dif_sgl_info.entry_index, + v_buf, v_buf_len); + return ret; +} + +unsigned int unf_get_uplevel_cmnd_errcode( + struct unf_ini_error_code_s *v_err_table, + unsigned int v_err_table_count, + unsigned int v_drv_err_code) +{ + unsigned int i; + + /* fail return UNF_RETURN_ERROR,adjust by up level */ + if (unlikely(!v_err_table)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Error Code Table is Null, Error Code(0x%x).", + v_drv_err_code); + + return (unsigned int)UNF_SCSI_HOST(DID_ERROR); + } + + for (i = 0; i < v_err_table_count; i++) { + if (v_drv_err_code == v_err_table[i].drv_err_code) + return v_err_table[i].ap_err_code; + } + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Unsupported Ap Error code by Error Code(0x%x).", + v_drv_err_code); + + return (unsigned int)UNF_SCSI_HOST(DID_ERROR); +} + +static unsigned int unf_ini_status_handle(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int i; + unsigned int ret; + unsigned int status; + + for (i = 0; + i < sizeof(ini_error_handler_table) / + sizeof(struct unf_ini_error_handler); + i++) { + if (UNF_GET_LL_ERR(v_pkg) == + ini_error_handler_table[i].error_code) { + status = unf_get_uplevel_cmnd_errcode( + v_xchg->scsi_cmnd_info.err_code_table, + v_xchg->scsi_cmnd_info.err_code_table_cout, + UNF_GET_LL_ERR(v_pkg)); + + if (ini_error_handler_table[i].pfn_unf_ini_error_handler) { + ret = ini_error_handler_table[i].pfn_unf_ini_error_handler( + v_xchg, + v_pkg, + status); + } else { + /* set exchange->result + * ---to--->>>scsi_result + */ + ret = unf_ini_error_default_handler(v_xchg, + v_pkg, + status); + } + + return ret; + } + } + + status = unf_get_uplevel_cmnd_errcode( + v_xchg->scsi_cmnd_info.err_code_table, + v_xchg->scsi_cmnd_info.err_code_table_cout, + UNF_IO_SOFT_ERR); + + ret = unf_ini_error_default_handler(v_xchg, v_pkg, status); + + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Can not find com status, SID(0x%x) exchange(0x%p) com_status(0x%x) DID(0x%x) hot_pool_tag(0x%x)", + v_xchg->sid, v_xchg, v_pkg->status, + v_xchg->did, v_xchg->hot_pool_tag); + + return ret; +} + +static void unf_analysis_response_info(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int *v_status) +{ + unsigned char *resp_buf = NULL; + + /* LL_Driver use Little End, and copy RSP_INFO to COM_Driver */ + if (v_pkg->unf_rsp_pload_bl.buffer_ptr) { + if (v_pkg->unf_rsp_pload_bl.buffer_ptr[0] != + UNF_FCP_TM_RSP_COMPLETE) { + *v_status = UNF_SCSI_HOST(DID_BUS_BUSY); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%p) DID bus busy, scsi_status(0x%x)", + v_xchg->lport, UNF_GET_SCSI_STATUS(v_pkg)); + } + } else { + resp_buf = + (unsigned char *)v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu; + if ((resp_buf)) { + /* If chip use Little End, then change it to Big End */ + if ((v_pkg->byte_orders & UNF_BIT_3) == 0) + unf_cpu_to_big_end( + resp_buf, + v_pkg->unf_rsp_pload_bl.length); + + /* Chip DAM data with Big End */ + if (resp_buf[3] != UNF_FCP_TM_RSP_COMPLETE) { + *v_status = UNF_SCSI_HOST(DID_BUS_BUSY); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%p) DID bus busy, scsi_status(0x%x)", + v_xchg->lport, + UNF_GET_SCSI_STATUS(v_pkg)); + } + } + } +} + +static void unf_analysis_sense_info(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg) +{ +#define MIN(x, y) ((x) < (y) ? (x) : (y)) + + unsigned int length = 0; + + /* 4 bytes Align */ + length = v_pkg->unf_sense_pload_bl.length; + if (length % 4 != 0) + length = 4 * ((length / 4) + 1); + + /* + * If have sense info then copy directly + * else, the chip has been dma the data to sense buffer + */ + if (v_pkg->unf_sense_pload_bl.buffer_ptr) { + /* carry from wqe by ll_driver & ucode: do not used */ + unf_cpu_to_big_end(v_pkg->unf_sense_pload_bl.buffer_ptr, + length); + + memcpy(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, + v_pkg->unf_sense_pload_bl.buffer_ptr, + (unsigned int)MIN(UNF_SCSI_SENSE_DATA_LEN, + v_pkg->unf_sense_pload_bl.length)); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]LPort(0x%p), Sense Length(%u), Scsi Status(0x%x).", + v_xchg->lport, + v_pkg->unf_sense_pload_bl.length, + UNF_GET_SCSI_STATUS(v_pkg)); + } else if ((length != 0) && + (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) { + /* has been dma to exchange buffer */ + if ((v_pkg->byte_orders & UNF_BIT_4) == 0) { + unf_cpu_to_big_end(((unsigned char *) + (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) + + v_pkg->unf_rsp_pload_bl.length, + v_pkg->unf_sense_pload_bl.length); + } + + memcpy(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, + ((unsigned char *) + (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) + + v_pkg->unf_rsp_pload_bl.length, + (unsigned int)MIN(UNF_SCSI_SENSE_DATA_LEN, + v_pkg->unf_sense_pload_bl.length)); + } +} + +static unsigned int unf_io_success_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status) +{ + unsigned char scsi_status; + unsigned char control; + unsigned int status = v_status; + + UNF_CHECK_VALID(0x1311, TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1312, TRUE, v_pkg, return UNF_RETURN_ERROR); + + control = UNF_GET_FCP_CTL(v_pkg); + scsi_status = UNF_GET_SCSI_STATUS(v_pkg); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Port(0x%p), Exchange(0x%p) Completed, Control(0x%x), Scsi Status(0x%x)", + v_xchg->lport, v_xchg, control, scsi_status); + + if (control & FCP_SNS_LEN_VALID_MASK) { + /* has sense info */ + if (scsi_status == FCP_SCSI_STATUS_GOOD) + scsi_status = SCSI_CHECK_CONDITION; + + unf_analysis_sense_info(v_xchg, v_pkg); + } else { + /* + * When the FCP_RSP_LEN_VALID bit is set to one, + * the content of the SCSI STATUS CODE field is not reliable + * and shall be ignored by the application client. + */ + if (control & FCP_RSP_LEN_VALID_MASK) + unf_analysis_response_info(v_xchg, v_pkg, &status); + } + + v_xchg->scsi_cmnd_info.result = status | + UNF_SCSI_STATUS(scsi_status); + + return RETURN_OK; +} + +static unsigned int unf_ini_error_default_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status) +{ + /* set exchange->result ---to--->>> scsi_cmnd->result */ + UNF_CHECK_VALID(0x1313, TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1314, TRUE, v_pkg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_ABNORMAL, UNF_WARN, + "[warn]SID(0x%x) exchange(0x%p) com_status(0x%x) up_status(0x%x) DID(0x%x) hot_pool_tag(0x%x) response_len(0x%x)", + v_xchg->sid, v_xchg, v_pkg->status, v_status, + v_xchg->did, v_xchg->hot_pool_tag, v_pkg->residus_len); + + v_xchg->scsi_cmnd_info.result = + v_status | UNF_SCSI_STATUS(UNF_GET_SCSI_STATUS(v_pkg)); + + return RETURN_OK; +} + +static unsigned int unf_ini_dif_error_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status) +{ + struct unf_dif_control_info_s *dif_control = NULL; + unsigned char *sense_data = NULL; + unsigned short sense_code = 0; + + UNF_CHECK_VALID(0x1315, TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1316, TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_status); + + /* + * According to DIF scheme + * drive set check condition(0x2) when dif error occurs, + * and returns the values base on the upper-layer verification resule + * Check sequence: crc,Lba,App, + * if CRC error is found, the subsequent check is not performed + */ + v_xchg->scsi_cmnd_info.result = + UNF_SCSI_STATUS(SCSI_CHECK_CONDITION); + dif_control = &v_pkg->dif_control; + + if (v_pkg->status_sub_code == 0) { + UNF_GET_DIF_ERROR_LEVEL1(v_xchg, dif_control, 0, + sense_code, DRV_DIF_CRC_ERR); + + UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, 0, + sense_code, DRV_DIF_LBA_ERR); + + UNF_GET_DIF_ERROR_LEVEL3(v_xchg, dif_control, 0, + sense_code, DRV_DIF_APP_ERR); + + if (sense_code == 0) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Unexpected DIF unwonted, operation_code(0x%x) actual DIF(0x%llx) expected DIF(0x%llx)", + v_xchg->dif_control.protect_opcode, + *(unsigned long long *) + &dif_control->actual_dif[0], + *(unsigned long long *) + &dif_control->expected_dif[0]); + } + } else { + sense_code = (unsigned short)v_pkg->status_sub_code; + } + + sense_data = (unsigned char *) + v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu; + memset(sense_data, 0, SCSI_SENSE_DATA_LEN); + sense_data[0] = 0x70; /* response code */ + sense_data[2] = ILLEGAL_REQUEST; /* sense key:0x05; */ + sense_data[7] = 0x7; /* additional sense length */ + sense_data[12] = (unsigned char)(sense_code >> 8); + sense_data[13] = (unsigned char)sense_code; + + /* valid sense data length snscode[13] */ + return RETURN_OK; +} + +static unsigned int unf_io_under_flow_handler(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg, + unsigned int v_status) +{ + /* under flow: residlen > 0 */ + UNF_CHECK_VALID(0x1317, TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1318, TRUE, v_pkg, return UNF_RETURN_ERROR); + + if ((v_xchg->fcp_cmnd.cdb[0] != SCSIOPC_REPORT_LUN) && + (v_xchg->fcp_cmnd.cdb[0] != SCSIOPC_INQUIRY)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]IO under flow: SID(0x%x) exchange(0x%p) com status(0x%x) up_status(0x%x) DID(0x%x) hot_pool_tag(0x%x) response SID(0x%x)", + v_xchg->sid, v_xchg, v_pkg->status, v_status, + v_xchg->did, v_xchg->hot_pool_tag, + v_pkg->residus_len); + } + + v_xchg->resid_len = (int)v_pkg->residus_len; + (void)unf_io_success_handler(v_xchg, v_pkg, v_status); + + return RETURN_OK; +} + +void unf_complete_cmnd(struct unf_scsi_cmd_s *v_scsi_cmnd, unsigned int result) +{ + /* + * Exception during process Que_CMND + * 1. L_Port == NULL; + * 2. L_Port == removing; + * 3. R_Port == NULL; + * 4. Xchg == NULL. + */ + UNF_CHECK_VALID(0x1319, TRUE, UNF_GET_CMND_DONE_FUNC(v_scsi_cmnd), + return); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Command(0x%p), Result(0x%x).", v_scsi_cmnd, result); + + UNF_SET_CMND_RESULT(v_scsi_cmnd, result); + + /* struct unf_scsi_cmd_s->pfn_done -->> unf_scsi_done */ + UNF_DONE_SCSI_CMND(v_scsi_cmnd); +} + +static inline void unf_bind_xchg_scsi_cmd(struct unf_xchg_s *v_xchg, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + struct unf_scsi_cmd_info_s *scsi_cmnd_info = NULL; + + scsi_cmnd_info = &v_xchg->scsi_cmnd_info; + + /* UNF_SCSI_CMND_INFO <<-- UNF_SCSI_CMND */ + scsi_cmnd_info->err_code_table = + UNF_GET_ERR_CODE_TABLE(v_scsi_cmnd); + scsi_cmnd_info->err_code_table_cout = + UNF_GET_ERR_CODE_TABLE_COUNT(v_scsi_cmnd); + scsi_cmnd_info->pfn_done = UNF_GET_CMND_DONE_FUNC(v_scsi_cmnd); + scsi_cmnd_info->scsi_cmnd = UNF_GET_HOST_CMND(v_scsi_cmnd); + scsi_cmnd_info->sense_buf = + (char *)UNF_GET_SENSE_BUF_ADDR(v_scsi_cmnd); + /* unf_get_frame_entry_buf */ + scsi_cmnd_info->pfn_unf_get_sgl_entry_buf = + UNF_GET_SGL_ENTRY_BUF_FUNC(v_scsi_cmnd); + scsi_cmnd_info->sgl = UNF_GET_CMND_SGL(v_scsi_cmnd); + scsi_cmnd_info->time_out = v_scsi_cmnd->time_out; + scsi_cmnd_info->entry_cnt = v_scsi_cmnd->entry_count; + scsi_cmnd_info->port_id = (unsigned int)v_scsi_cmnd->port_id; + scsi_cmnd_info->scsi_id = UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd); +} + +unsigned int unf_ini_scsi_completed(void *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_fcp_cmnd_s *fcp_cmnd = NULL; + unsigned int control; + unsigned short xchg_tag; + unsigned int ret; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1323, TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1324, TRUE, v_pkg, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + xchg_tag = + (unsigned short)v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]; + + /* 1. Find Exchange Context */ + xchg = unf_cm_lookup_xchg_by_tag(v_lport, (unsigned short)xchg_tag); + if (unlikely(!xchg)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) can not find exchange by tag(0x%x)", + lport->port_id, lport->nport_id, xchg_tag); + + /* NOTE: return directly */ + return UNF_RETURN_ERROR; + } + + /* 2. Consistency check */ + UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, xchg, + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + /* 3. Increase ref_cnt for exchange protecting */ + ret = unf_xchg_ref_inc(xchg, INI_RESPONSE_DONE); /* hold */ + UNF_CHECK_VALID(0x1325, TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + fcp_cmnd = &xchg->fcp_cmnd; + control = fcp_cmnd->control; + control = UNF_GET_TASK_MGMT_FLAGS(control); + + /* 4. Cancel timer if necessary */ + if (xchg->scsi_cmnd_info.time_out != 0) + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(xchg); + + /* 5. process scsi TMF if necessary */ + if (control != 0) { + unf_process_scsi_mgmt_result(v_pkg, xchg); + unf_xchg_ref_dec(xchg, INI_RESPONSE_DONE); /* cancel hold */ + + /* NOTE: return directly */ + return RETURN_OK; + } + + /* 6. Xchg Abort state check */ + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + if (INI_IO_STATE_UPABORT & xchg->io_state) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) find exchange(%p) state(0x%x) has been aborted", + lport->port_id, xchg, xchg->io_state); + + /* NOTE: release exchange during SCSI ABORT(ABTS) */ + unf_xchg_ref_dec(xchg, INI_RESPONSE_DONE); /* cancel hold */ + + return ret; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + /* + * 7. INI SCSI CMND Status process + * set exchange->result ---to--->>> scsi_result + */ + ret = unf_ini_status_handle(xchg, v_pkg); + + /* 8. NOTE: release exchange if necessary */ + unf_cm_free_xchg(lport, xchg); + + /* 9. dec exch ref_cnt */ + /* cancel hold: release resource now */ + unf_xchg_ref_dec(xchg, INI_RESPONSE_DONE); + + return ret; +} + +unsigned int unf_hardware_start_io(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + if (unlikely(!v_lport->low_level_func.service_op.pfn_unf_cmnd_send)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) low level send scsi function is NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + return v_lport->low_level_func.service_op.pfn_unf_cmnd_send( + v_lport->fc_port, + v_pkg); +} + +struct unf_rport_s *unf_find_rport_by_scsi_id( + struct unf_lport_s *v_lport, + struct unf_ini_error_code_s *v_err_code_table, + unsigned int v_err_code_table_cout, + unsigned int v_scsi_id, + unsigned int *v_scsi_result) +{ + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + + /* scsi_table -> session_table -> image_table */ + scsi_image_table = &v_lport->rport_scsi_table; + + /* 1. Scsi_Id validity check */ + if (unlikely(v_scsi_id >= scsi_image_table->max_scsi_id)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Input scsi_id(0x%x) bigger than max_scsi_id(0x%x).", + v_scsi_id, scsi_image_table->max_scsi_id); + + *v_scsi_result = unf_get_uplevel_cmnd_errcode( + v_err_code_table, + v_err_code_table_cout, + UNF_IO_SOFT_ERR); /* did_soft_error */ + + return NULL; + } + + /* 2. GetR_Port_Info/R_Port: use Scsi_Id find from L_Port's + * Rport_Scsi_Table (image table) + */ + spin_lock_irqsave(&scsi_image_table->scsi_image_table_lock, flags); + wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[v_scsi_id]; + rport = wwpn_rport_info->rport; + spin_unlock_irqrestore(&scsi_image_table->scsi_image_table_lock, flags); + + if (unlikely(!rport)) { + *v_scsi_result = unf_get_uplevel_cmnd_errcode( + v_err_code_table, + v_err_code_table_cout, + /* did_not_connect */ + UNF_IO_PORT_LOGOUT); + + return NULL; + } + + return rport; +} + +static unsigned int unf_build_xchg_fcp_cmnd(struct unf_fcp_cmnd_s *v_fcp_cmnd, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* SCSI_CMND -->> FCP_CMND */ + if (UNF_GET_DATA_DIRECTION(v_scsi_cmnd) == DMA_TO_DEVICE) { + v_fcp_cmnd->control = UNF_FCP_WR_DATA; + } else if (UNF_GET_DATA_DIRECTION(v_scsi_cmnd) == DMA_FROM_DEVICE) { + v_fcp_cmnd->control = UNF_FCP_RD_DATA; + } else { + /* DMA Direction None */ + v_fcp_cmnd->control = 0; + } + + memcpy(v_fcp_cmnd->cdb, &UNF_GET_FCP_CMND(v_scsi_cmnd), + v_scsi_cmnd->cmnd_len); + + if (((v_fcp_cmnd->control == UNF_FCP_WR_DATA) && + (IS_READ_COMMAND(v_fcp_cmnd->cdb[0]))) || + ((v_fcp_cmnd->control == UNF_FCP_RD_DATA) && + (IS_WRITE_COMMAND(v_fcp_cmnd->cdb[0])))) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MINOR, + "Scsi command direction inconsistent, CDB[0](0x%x), direction(0x%x).", + v_fcp_cmnd->cdb[0], v_fcp_cmnd->control); + + return UNF_RETURN_ERROR; + } + + memcpy(v_fcp_cmnd->lun, v_scsi_cmnd->pc_lun_id, + sizeof(v_fcp_cmnd->lun)); + + unf_big_end_to_cpu((void *)v_fcp_cmnd->cdb, + sizeof(v_fcp_cmnd->cdb)); + v_fcp_cmnd->data_length = UNF_GET_DATA_LEN(v_scsi_cmnd); + + return RETURN_OK; +} + +static void unf_adjust_xchg_len(struct unf_xchg_s *v_xchg, + unsigned int v_scsi_cmnd) +{ + switch (v_scsi_cmnd) { + case SCSIOPC_REQUEST_SENSE: /* requires different buffer */ + v_xchg->data_len = SCSI_SENSE_DATA_LEN; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MINOR, + "Request Sense new."); + break; + case SCSIOPC_TEST_UNIT_READY: + case SCSIOPC_RESERVE: + case SCSIOPC_RELEASE: + case SCSIOPC_START_STOP_UNIT: + v_xchg->data_len = 0; + break; + default: + break; + } +} + +static void unf_copy_dif_control(struct unf_dif_control_info_s *v_dif_control, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + v_dif_control->fcp_dl = v_scsi_cmnd->dif_control.fcp_dl; + v_dif_control->protect_opcode = + v_scsi_cmnd->dif_control.protect_opcode; + v_dif_control->start_lba = v_scsi_cmnd->dif_control.start_lba; + v_dif_control->app_tag = v_scsi_cmnd->dif_control.app_tag; + + v_dif_control->flags = v_scsi_cmnd->dif_control.flags; + v_dif_control->dif_sge_count = + v_scsi_cmnd->dif_control.dif_sge_count; + v_dif_control->dif_sgl = v_scsi_cmnd->dif_control.dif_sgl; +} + +static void unf_adjsut_dif_pci_transfer_len(struct unf_xchg_s *v_xchg, + unsigned int direction) +{ + struct unf_dif_control_info_s *dif_control = NULL; + unsigned int sector_size = 512; + + dif_control = &v_xchg->dif_control; + + if (dif_control->protect_opcode == UNF_DIF_ACTION_NONE) + return; + + switch (dif_control->protect_opcode & UNF_DIF_ACTION_MASK) { + case UNF_DIF_ACTION_INSERT: + if (direction == DMA_TO_DEVICE) { + /* write IO,insert,Indicates that data with DIF is + * transmitted over the link. + */ + dif_control->fcp_dl = + v_xchg->data_len + + UNF_CAL_BLOCK_CNT(v_xchg->data_len, + sector_size) * + UNF_DIF_AREA_SIZE; + } else { + /* read IO,insert,Indicates that the internal DIf is + * carried, and the link does not carry the DIf. + */ + dif_control->fcp_dl = v_xchg->data_len; + } + break; + case UNF_DIF_ACTION_VERIFY_AND_DELETE: + if (direction == DMA_TO_DEVICE) { + /* write IO,Delete,Indicates that the internal DIf is + * carried, and the link does not carry the DIf. + */ + dif_control->fcp_dl = v_xchg->data_len; + } else { + /* read IO,Delete,Indicates that data with DIF is + * carried on the link and does not contain DIF + * on internal. + */ + dif_control->fcp_dl = + v_xchg->data_len + + UNF_CAL_BLOCK_CNT(v_xchg->data_len, + sector_size) * + UNF_DIF_AREA_SIZE; + } + break; + case UNF_DIF_ACTION_VERIFY_AND_FORWARD: + dif_control->fcp_dl = + v_xchg->data_len + + UNF_CAL_BLOCK_CNT(v_xchg->data_len, sector_size) * + UNF_DIF_AREA_SIZE; + break; + default: + dif_control->fcp_dl = v_xchg->data_len; + break; + } + + v_xchg->fcp_cmnd.data_length = dif_control->fcp_dl; +} + +static int unf_save_scsi_cmnd_to_xchg(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_xchg_s *xchg = v_xchg; + unsigned int result; + + v_scsi_cmnd->driver_scribble = (void *)xchg->start_jif; + xchg->rport = rport; + xchg->rport_bind_jifs = rport->rport_alloc_jifs; + + if (lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) + xchg->ox_id = xchg->hot_pool_tag; + + /* Build Xchg SCSI_CMND info */ + unf_bind_xchg_scsi_cmd(xchg, v_scsi_cmnd); + + xchg->data_len = UNF_GET_DATA_LEN(v_scsi_cmnd); + xchg->data_direction = UNF_GET_DATA_DIRECTION(v_scsi_cmnd); + xchg->sid = lport->nport_id; + xchg->did = rport->nport_id; + xchg->private[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index; + xchg->world_id = v_scsi_cmnd->world_id; + xchg->cmnd_sn = v_scsi_cmnd->cmnd_sn; + xchg->scsi_id = v_scsi_cmnd->scsi_id; + + /* Build Xchg fcp_cmnd */ + result = unf_build_xchg_fcp_cmnd(&xchg->fcp_cmnd, v_scsi_cmnd); + if (unlikely(result != RETURN_OK)) + return UNF_RETURN_ERROR; + + unf_adjust_xchg_len(xchg, UNF_GET_FCP_CMND(v_scsi_cmnd)); + + /* Dif (control) info */ + unf_copy_dif_control(&xchg->dif_control, v_scsi_cmnd); + memcpy(&xchg->dif_info, &v_scsi_cmnd->dif_info, + sizeof(struct dif_info_s)); + unf_adjsut_dif_pci_transfer_len(xchg, + UNF_GET_DATA_DIRECTION(v_scsi_cmnd)); + + /* single sgl info */ + if ((xchg->data_direction != DMA_NONE) && + (UNF_GET_CMND_SGL(v_scsi_cmnd))) { + xchg->req_sgl_info.sgl = UNF_GET_CMND_SGL(v_scsi_cmnd); + /* Save the sgl header for easy location and printing. */ + xchg->req_sgl_info.sgl_start = xchg->req_sgl_info.sgl; + xchg->req_sgl_info.req_index = 0; + xchg->req_sgl_info.entry_index = 0; + } + + if (v_scsi_cmnd->dif_control.dif_sgl) { + xchg->dif_sgl_info.sgl = UNF_INI_GET_DIF_SGL(v_scsi_cmnd); + xchg->dif_sgl_info.entry_index = 0; + xchg->dif_sgl_info.req_index = 0; + xchg->dif_sgl_info.sgl_start = xchg->dif_sgl_info.sgl; + } + + return RETURN_OK; +} + +static int unf_send_fcp_cmnd(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_scsi_cmd_info_s *scsi_cmnd_info = NULL; + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_xchg_s *xchg = v_xchg; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int result; + unsigned long flags = 0; + + memcpy(&pkg.dif_control, &xchg->dif_control, + sizeof(struct unf_dif_control_info_s)); + pkg.dif_control.fcp_dl = xchg->dif_control.fcp_dl; + pkg.transfer_len = xchg->data_len; /* Pcie data transfer length */ + pkg.xchg_contex = xchg; + pkg.qos_level = 0; + pkg.entry_count = xchg->scsi_cmnd_info.entry_cnt; + scsi_cmnd_info = &v_xchg->scsi_cmnd_info; + if ((xchg->data_direction == DMA_NONE) || (!scsi_cmnd_info->sgl)) + pkg.entry_count = 0; + + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + pkg.private[PKG_PRIVATE_XCHG_VP_INDEX] = lport->vp_index; + pkg.private[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hot_pool_tag; + + pkg.fcp_cmnd = &xchg->fcp_cmnd; + pkg.frame_head.csctl_sid = lport->nport_id; + pkg.frame_head.rctl_did = rport->nport_id; + pkg.upper_cmd = xchg->scsi_cmnd_info.scsi_cmnd; + + /* exch->fcp_rsp_id --->>> pkg->buffer_ptr */ + pkg.unf_rsp_pload_bl.buffer_ptr = + (unsigned char *) + v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu; + pkg.unf_rsp_pload_bl.buf_dma_addr = + v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr; + pkg.unf_rsp_pload_bl.length = PAGE_SIZE; + + pkg.frame_head.oxid_rxid = + ((unsigned int)xchg->ox_id << 16 | xchg->rx_id); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EQUIP_ATT, UNF_INFO, + "[info]LPort (0x%p), Nport ID(0x%x) RPort ID(0x%x) direction(0x%x) magic number(0x%x) send IO to OX_ID(0x%x) entry count(0x%x) tag(0x%x)", + lport, lport->nport_id, rport->nport_id, + v_xchg->data_direction, + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME], + v_xchg->ox_id, pkg.entry_count, xchg->hot_pool_tag); + + atomic_inc(&rport->pending_io_cnt); + if ((rport->tape_support_needed == UNF_TRUE) && + (atomic_read(&rport->pending_io_cnt) <= 3)) { + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + v_xchg->io_state |= INI_IO_STATE_REC_TIMEOUT_WAIT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + scsi_cmnd_info->abort_timeout = scsi_cmnd_info->time_out; + scsi_cmnd_info->time_out = UNF_REC_TOV; + } + + /* 3. add INI I/O timer if necessary */ + if (scsi_cmnd_info->time_out != 0) { + /* I/O inner timer, do not used at this time */ + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + xchg, + scsi_cmnd_info->time_out, + UNF_TIMER_TYPE_REQ_IO); + } + + /* 4. R_Port state check */ + if (unlikely((rport->lport_ini_state != UNF_PORT_STATE_LINKUP) || + (rport->rp_state > UNF_RPORT_ST_READY))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[info]Port(0x%x) RPort(0x%p) NPortId(0x%x) inistate(0x%x): RPort state(0x%x) upper_cmd(0x%p) is not ready", + lport->port_id, rport, rport->nport_id, + rport->lport_ini_state, rport->rp_state, + pkg.upper_cmd); + + result = unf_get_uplevel_cmnd_errcode( + scsi_cmnd_info->err_code_table, + scsi_cmnd_info->err_code_table_cout, + UNF_IO_INCOMPLETE); + scsi_cmnd_info->result = result; + if (scsi_cmnd_info->time_out != 0) + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(xchg); + + unf_cm_free_xchg(lport, xchg); + /* DID_IMM_RETRY */ + return RETURN_OK; + } else if (rport->rp_state < UNF_RPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[info]Port(0x%x) RPort(0x%p) NPortId(0x%x) inistate(0x%x): RPort state(0x%x) upper_cmd(0x%p) is not ready", + lport->port_id, rport, rport->nport_id, + rport->lport_ini_state, rport->rp_state, + pkg.upper_cmd); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + xchg->io_state |= INI_IO_STATE_UPSEND_ERR; /* need retry */ + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + if (unlikely(scsi_cmnd_info->time_out != 0)) + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)xchg); + + /* Host busy & need scsi retry */ + return UNF_RETURN_ERROR; + } + + /* 5. send scsi_cmnd to FC_LL Driver */ + if (unf_hardware_start_io(lport, &pkg) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port (0x%x) upper_cmd(0x%p) Hardware Send IO failed.", + lport->port_id, pkg.upper_cmd); + + unf_release_esgls(xchg); + result = unf_get_uplevel_cmnd_errcode( + scsi_cmnd_info->err_code_table, + scsi_cmnd_info->err_code_table_cout, + UNF_IO_INCOMPLETE); + scsi_cmnd_info->result = result; + if (scsi_cmnd_info->time_out != 0) + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(xchg); + + unf_cm_free_xchg(lport, xchg); + /* SCSI_DONE */ + return RETURN_OK; + } + + return RETURN_OK; +} + +int unf_prefer_to_send_scsi_cmnd(struct unf_xchg_s *v_xchg) +{ + /* + * About INI_IO_STATE_DRABORT: + * 1. Set ABORT tag: Clean L_Port/V_Port Link Down I/O + * with: INI_busy_list, delay_list, delay_transfer_list, wait_list + * + * 2. Set ABORT tag: for target session: + * with: INI_busy_list, delay_list, delay_transfer_list, wait_list + * a. R_Port remove + * b. Send PLOGI_ACC callback + * c. RCVD PLOGI + * d. RCVD LOGO + * + * 3. if set ABORT: prevent send scsi_cmnd to target + */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + int ret; + unsigned long flags = 0; + + lport = v_xchg->lport; + rport = v_xchg->rport; + if (unlikely(!lport || !rport)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%p) or RPort(0x%p) is NULL", lport, + rport); + + /* if happened (never happen): need retry */ + return UNF_RETURN_ERROR; + } + + /* 1. inc ref_cnt to protect exchange */ + ret = (int)unf_xchg_ref_inc(v_xchg, INI_SEND_CMND); + if (unlikely(ret != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) exhg(%p) exception ref(%d) ", + lport->port_id, v_xchg, + atomic_read(&v_xchg->ref_cnt)); + /* exchange exception, need retry */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + v_xchg->io_state |= INI_IO_STATE_UPSEND_ERR; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + /* INI_IO_STATE_UPSEND_ERR: Host busy --->>> need retry */ + return UNF_RETURN_ERROR; + } + + /* 2. Xchg Abort state check: Free EXCH if necessary */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + if (unlikely((v_xchg->io_state & INI_IO_STATE_UPABORT) || + (v_xchg->io_state & INI_IO_STATE_DRABORT))) { + /* Prevent to send: UP_ABORT/DRV_ABORT */ + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + v_xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_IMM_RETRY); + + unf_xchg_ref_dec(v_xchg, INI_SEND_CMND); + unf_cm_free_xchg(lport, v_xchg); + + /* + * Release exchange & return directly: + * 1. FC LLDD rcvd ABTS before scsi_cmnd: do nothing + * 2. INI_IO_STATE_UPABORT/INI_IO_STATE_DRABORT: + * discard this cmnd directly + */ + return RETURN_OK; + } + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + /* 3. Send FCP_CMND to FC_LL Driver */ + ret = unf_send_fcp_cmnd(lport, rport, v_xchg); + if (unlikely(ret != RETURN_OK)) { + /* exchange exception, need retry */ + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send exhg(%p) OX_ID(0x%x) RX_ID(0x%x) to Rport(%p) NPortID(0x%x) state(0x%x) scsi_id(0x%x) failed", + lport->port_id, v_xchg, v_xchg->ox_id, + v_xchg->rx_id, + rport, rport->nport_id, rport->rp_state, + rport->scsi_id); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + v_xchg->io_state |= INI_IO_STATE_UPSEND_ERR; /* need retry */ + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + /* INI_IO_STATE_UPSEND_ERR: Host busy --->>> need retry */ + unf_cm_free_xchg(lport, v_xchg); + } + + /* 4. dec ref_cnt */ + unf_xchg_ref_dec(v_xchg, INI_SEND_CMND); + + return ret; +} + +struct unf_lport_s *unf_find_lport_by_scsi_cmd( + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + struct unf_lport_s *lport = NULL; + + /* cmd -->> L_Port */ + lport = (struct unf_lport_s *)UNF_GET_HOST_PORT_BY_CMND(v_scsi_cmnd); + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Find Port by scsi_cmnd(0x%p) failed", + v_scsi_cmnd); + + /* cmnd -->> scsi_host_id -->> L_Port */ + lport = unf_find_lport_by_scsi_host_id( + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + } + return lport; +} + +int unf_cm_queue_command(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* SCSI Command --->>> FC FCP Command */ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_rport_s *rport = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int result = 0; + int ret; + unsigned long flags = 0; + unsigned int scsi_id; + unsigned int exhg_mgr_type = UNF_XCHG_MGR_TYPE_RANDOM; + + /* 1. Get L_Port */ + lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd); + + /* + * corresponds to the insertion or removal scenario or + * the remove card scenario. + * This method is used to search for LPort information + * based on SCSI_HOST_ID. + * The Slave alloc is not invoked when LUNs are not scanned. + * Therefore, the Lport cannot be obtained. + * You need to obtain the Lport from the Lport linked list. + * + * FC After Link Up, the first SCSI command is inquiry. + * Before inquiry, SCSI delivers slave_alloc. + */ + if (!lport) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Find Port by scsi cmd(0x%p) failed", + v_scsi_cmnd); + + /* find from ini_error_code_table1 */ + result = unf_get_uplevel_cmnd_errcode( + v_scsi_cmnd->err_code_table, + v_scsi_cmnd->err_code_table_cout, + UNF_IO_NO_LPORT); /* did_not_connect */ + + /* DID_NOT_CONNECT & SCSI_DONE & RETURN_OK(0) & I/O error */ + unf_complete_cmnd(v_scsi_cmnd, result); + return RETURN_OK; + } + + /* Get Local SCSI_Image_table & SCSI_ID */ + scsi_image_table = &lport->rport_scsi_table; + scsi_id = v_scsi_cmnd->scsi_id; + + /* 2. L_Port State check */ + if (unlikely((lport->b_port_removing == UNF_TRUE) || + (lport->b_pcie_linkdown == UNF_TRUE))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) is removing(%d) or pcielinkdown(%d) and return with scsi_id(0x%x)", + lport->port_id, lport->b_port_removing, + lport->b_pcie_linkdown, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + result = unf_get_uplevel_cmnd_errcode( + v_scsi_cmnd->err_code_table, + v_scsi_cmnd->err_code_table_cout, + UNF_IO_NO_LPORT); /* did_not_connect */ + + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, (result >> 16)); + + /* DID_NOT_CONNECT & SCSI_DONE & RETURN_OK(0) & I/O error */ + unf_complete_cmnd(v_scsi_cmnd, result); + return RETURN_OK; + } + + /* 3. Get R_Port */ + rport = unf_find_rport_by_scsi_id(lport, + v_scsi_cmnd->err_code_table, + v_scsi_cmnd->err_code_table_cout, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd), + &result); + if (unlikely(!rport)) { + /* never happen: do not care */ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) find RPort by scsi_id(0x%x) failed", + lport->port_id, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, (result >> 16)); + + /* DID_NOT_CONNECT/DID_SOFT_ERROR & SCSI_DONE & + * RETURN_OK(0) & I/O error + */ + unf_complete_cmnd(v_scsi_cmnd, result); + return RETURN_OK; + } + + /* 4. Can't get exchange & retrun host busy, retry by uplevel */ + xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg( + lport, + exhg_mgr_type << 16 | UNF_XCHG_TYPE_INI); + if (unlikely(!xchg)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) get free exchange for INI IO(0x%x) failed", + lport->port_id, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + /* NOTE: need scsi retry */ + return UNF_RETURN_ERROR; + } + + xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_ERROR); + /* 5. Save the SCSI CMND information in advance. */ + ret = unf_save_scsi_cmnd_to_xchg(lport, rport, xchg, v_scsi_cmnd); + if (unlikely(ret != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) save scsi_cmnd info(0x%x) to exchange failed", + lport->port_id, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->io_state |= INI_IO_STATE_UPSEND_ERR; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* INI_IO_STATE_UPSEND_ERR: Don't Do SCSI_DONE, + * need retry I/O + */ + unf_cm_free_xchg(lport, xchg); + /* NOTE: need scsi retry */ + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Get exchange(0x%p) OX_ID(0x%x) RX_ID(0x%x) hot_pool_tag(0x%x) for Pcmd:%p,Cmdsn:0x%lx,WorldId:%u", + xchg, xchg->ox_id, xchg->rx_id, + xchg->hot_pool_tag, v_scsi_cmnd->upper_cmnd, + (unsigned long)v_scsi_cmnd->cmnd_sn, + v_scsi_cmnd->world_id); + /* 6. Send SCSI CMND */ + ret = unf_prefer_to_send_scsi_cmnd(xchg); + return ret; +} diff --git a/drivers/scsi/huawei/hifc/unf_io.h b/drivers/scsi/huawei/hifc/unf_io.h new file mode 100644 index 000000000000..1b8d0daa1412 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_io.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_IO_H__ +#define __UNF_IO_H__ + +#define UNF_MAX_TARGET_NUMBER 2048 +#define UNF_DEFAULT_MAX_LUN 0xFFFF +#define UNF_MAX_DMA_SEGS 0x400 +#define UNF_MAX_SCSI_CMND_LEN 16 +#define UNF_MAX_SECTORS 0xffff +#define UNF_MAX_BUS_CHANNEL 0 +#define UNF_DMA_BOUNDARY 0xffffffffffffffff +#define UNF_MAX_CMND_PER_LUN 64 /* LUN max command */ + +#define NO_SENSE 0x00 +#define RECOVERED_ERROR 0x01 +#define NOT_READY 0x02 +#define MEDIUM_ERROR 0x03 +#define HARDWARE_ERROR 0x04 +#define ILLEGAL_REQUEST 0x05 +#define UNIT_ATTENTION 0x06 +#define DATA_PROTECT 0x07 +#define BLANK_CHECK 0x08 +#define COPY_ABORTED 0x0a +#define ABORTED_COMMAND 0x0b +#define VOLUME_OVERFLOW 0x0d +#define MISCOMPARE 0x0e + +#define UNF_GET_SCSI_HOST_ID_BY_CMND(pcmd) ((pcmd)->scsi_host_id) +#define UNF_GET_SCSI_ID_BY_CMND(pcmd) ((pcmd)->scsi_id) +#define UNF_GET_HOST_PORT_BY_CMND(pcmd) ((pcmd)->drv_private) +#define UNF_GET_FCP_CMND(pcmd) ((pcmd)->pcmnd[0]) +#define UNF_GET_DATA_LEN(pcmd) ((pcmd)->transfer_len) +#define UNF_GET_DATA_DIRECTION(pcmd) ((pcmd)->data_direction) + +#define UNF_GET_HOST_CMND(pcmd) ((pcmd)->upper_cmnd) +#define UNF_GET_CMND_DONE_FUNC(pcmd) ((pcmd)->pfn_done) +#define UNF_GET_SGL_ENTRY_BUF_FUNC(pcmd) ((pcmd)->pfn_unf_ini_get_sgl_entry) +#define UNF_GET_SENSE_BUF_ADDR(pcmd) ((pcmd)->sense_buf) +#define UNF_GET_ERR_CODE_TABLE(pcmd) ((pcmd)->err_code_table) +#define UNF_GET_ERR_CODE_TABLE_COUNT(pcmd) ((pcmd)->err_code_table_cout) + +#define UNF_SET_HOST_CMND(pcmd, host_cmd) ((pcmd)->upper_cmnd = (host_cmd)) +#define UNF_SET_CMND_DONE_FUNC(pcmd, pfn) ((pcmd)->pfn_done = (pfn)) + +#define UNF_SET_RESID(pcmd, id_len) ((pcmd)->resid = (id_len)) +#define UNF_SET_CMND_RESULT(pcmd, uiresult) ((pcmd)->result = ((int)uiresult)) + +#define UNF_DONE_SCSI_CMND(pcmd) ((pcmd)->pfn_done(pcmd)) + +#define UNF_GET_CMND_SGL(pcmd) ((pcmd)->sgl) +#define UNF_INI_GET_DIF_SGL(pcmd) ((pcmd)->dif_control.dif_sgl) + +unsigned int unf_ini_scsi_completed(void *v_lport, + struct unf_frame_pkg_s *v_pkg); +unsigned int unf_ini_get_sgl_entry(void *v_pkg, char **v_buf, + unsigned int *v_buf_len); +unsigned int unf_ini_get_dif_sgl_entry(void *v_pkg, char **v_buf, + unsigned int *v_buf_len); + +void unf_complete_cmnd(struct unf_scsi_cmd_s *v_scsi_cmnd, unsigned int result); +void unf_done_ini_xchg(struct unf_xchg_s *v_xchg); +unsigned int unf_tmf_timeout_recovery_special(void *v_rport, void *v_xchg); +void unf_abts_timeout_recovery_default(void *v_rport, void *v_xchg); +int unf_cm_queue_command(struct unf_scsi_cmd_s *v_scsi_cmnd); +int unf_cm_eh_abort_handler(struct unf_scsi_cmd_s *v_scsi_cmnd); +int unf_cm_eh_device_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd); +int unf_cm_target_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd); +int unf_cm_bus_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd); +struct unf_rport_s *unf_find_rport_by_scsi_id( + struct unf_lport_s *v_lport, + struct unf_ini_error_code_s *v_err_code_table, + unsigned int v_err_code_table_cout, + unsigned int v_scsi_id, + unsigned int *v_scsi_result); + +struct unf_lport_s *unf_find_lport_by_scsi_cmd( + struct unf_scsi_cmd_s *v_scsi_cmnd); +void unf_tmf_abnormal_recovery(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg); +unsigned int unf_get_uplevel_cmnd_errcode( + struct unf_ini_error_code_s *v_err_table, + unsigned int v_err_table_count, + unsigned int v_drv_err_code); + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_io_abnormal.c b/drivers/scsi/huawei/hifc/unf_io_abnormal.c new file mode 100644 index 000000000000..f96bd0755b23 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_io_abnormal.c @@ -0,0 +1,926 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_exchg.h" +#include "unf_rport.h" +#include "unf_io.h" +#include "unf_portman.h" +#include "unf_service.h" +#include "unf_io_abnormal.h" + +static int unf_send_abts_success(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + struct unf_scsi_cmd_s *v_scsi_cmnd, + unsigned int time_out_value) +{ + int wait_marker = UNF_TRUE; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int scsi_id; + unsigned int ret; + unsigned long flag = 0; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + wait_marker = (v_xchg->abts_state & MARKER_STS_RECEIVED) ? + UNF_FALSE : UNF_TRUE; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + if (wait_marker) { + if (down_timeout( + &v_xchg->task_sema, + (long long)msecs_to_jiffies(time_out_value))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x 0x%x) RX_ID(0x%x)", + v_lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->hot_pool_tag, + v_xchg->rx_id); + + /* Cancel abts rsp timer when sema timeout */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)v_xchg); + + /* Cnacel the flag of INI_IO_STATE_UPABORT and + * process the io in TMF + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + v_xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + return UNF_SCSI_ABORT_FAIL; + } + } else { + v_xchg->ucode_abts_state = UNF_IO_SUCCESS; + } + + scsi_image_table = &v_lport->rport_scsi_table; + scsi_id = v_scsi_cmnd->scsi_id; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + if ((v_xchg->ucode_abts_state == UNF_IO_SUCCESS) || + (v_xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING)) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)", + v_lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->rx_id, + v_xchg->ucode_abts_state); + ret = DID_RESET; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret); + unf_complete_cmnd(v_scsi_cmnd, DID_RESET << 16); + return UNF_SCSI_ABORT_SUCCESS; + } + + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + v_xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + /* Cancel abts rsp timer when sema timeout */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) send ABTS failed. Exch(0x%p) oxid(0x%x) hot_tag(0x%x) ret(0x%x) v_xchg->io_state (0x%x)", + v_lport->port_id, v_xchg, v_xchg->ox_id, + v_xchg->hot_pool_tag, + v_xchg->scsi_cmnd_info.result, v_xchg->io_state); + + /* return fail and then enter TMF */ + return UNF_SCSI_ABORT_FAIL; +} + +static int unf_ini_abort_cmnd(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* + * About INI_IO_STATE_UPABORT: + * + * 1. Check: AC power down + * 2. Check: L_Port destroy + * 3. Check: I/O XCHG timeout + * 4. Set ABORT: send ABTS + * 5. Set ABORT: LUN reset + * 6. Set ABORT: Target reset + * 7. Check: Prevent to send I/O to target (UNF_PreferToSendScsiCmnd) + * 8. Check: Done INI XCHG --->>> do not call scsi_done, return directly + * 9. Check: INI SCSI Complete --->>> + * do not call scsi_done, return directly + */ +#define UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT (2000) /* 2s */ + + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int scsi_id; + unsigned int ret; + + unsigned int time_out_value = (unsigned int)UNF_WAIT_SEM_TIMEOUT; + + UNF_CHECK_VALID(0x1335, TRUE, v_lport, return UNF_SCSI_ABORT_FAIL); + lport = v_lport; + + /* 1. Xchg State Set: INI_IO_STATE_UPABORT */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state |= INI_IO_STATE_UPABORT; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + /* 2. R_Port check */ + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send ABTS but no RPort, OX_ID(0x%x) RX_ID(0x%x)", + lport->port_id, v_xchg->ox_id, v_xchg->rx_id); + + return UNF_SCSI_ABORT_SUCCESS; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) find RPort's state(0x%x) is not ready but send ABTS also, exchange(0x%p) tag(0x%x)", + lport->port_id, rport->rp_state, + v_xchg, v_xchg->hot_pool_tag); + + /* + * Important: Send ABTS also & update timer + * Purpose: only used for release chip (uCode) resource, + * continue + */ + time_out_value = UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT; + } + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* 3. L_Port State check */ + if (unlikely(lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) is removing", lport->port_id); + + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + + return UNF_SCSI_ABORT_FAIL; + } + + scsi_image_table = &lport->rport_scsi_table; + scsi_id = v_scsi_cmnd->scsi_id; + + /* If pcie linkdown, complete this io and flush all io */ + if (unlikely(lport->b_pcie_linkdown == UNF_TRUE)) { + ret = DID_RESET; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret); + unf_complete_cmnd(v_scsi_cmnd, DID_RESET << 16); + unf_free_lport_all_xchg(v_lport); + return UNF_SCSI_ABORT_SUCCESS; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[abort]Port(0x%x) Exchg(0x%p) delay(%llu) SID(0x%x) DID(0x%x) wwpn(0x%llx) OxID(0x%x 0x%x) scsi_id(0x%x) lun_id(0x%x) cmdsn(0x%llx)", + lport->port_id, v_xchg, + (unsigned long long)jiffies_to_msecs(jiffies) - + (unsigned long long)jiffies_to_msecs(v_xchg->alloc_jif), + v_xchg->sid, v_xchg->did, rport->port_name, + v_xchg->ox_id, v_xchg->hot_pool_tag, v_scsi_cmnd->scsi_id, + (unsigned int)v_scsi_cmnd->lun_id, v_scsi_cmnd->cmnd_sn); + + /* Init abts marker semaphore */ + sema_init(&v_xchg->task_sema, 0); + + if (v_xchg->scsi_cmnd_info.time_out != 0) + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer(v_xchg); + + /* Add timer for sending ABTS */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)v_xchg, + (unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT, + UNF_TIMER_TYPE_INI_ABTS); + + /* 4. Send INI ABTS CMND */ + if (unf_send_abts(lport, v_xchg) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) Send ABTS failed. Exch(0x%p) OX_ID(0x%x 0x%x) RX_ID(0x%x)", + lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->hot_pool_tag, + v_xchg->rx_id); + + /* Cancel timer when sending ABTS failed */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)v_xchg); + + /* Cnacel the flag of INI_IO_STATE_UPABORT + * and process the io in TMF + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + v_xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + return UNF_SCSI_ABORT_FAIL; + } + + return unf_send_abts_success(lport, v_xchg, v_scsi_cmnd, + time_out_value); +} + +static void unf_flush_ini_resp_que(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1335, TRUE, v_lport, return); + + if (v_lport->low_level_func.service_op.pfn_unf_flush_ini_resp_que) + (void)v_lport->low_level_func.service_op.pfn_unf_flush_ini_resp_que(v_lport->fc_port); +} + +int unf_cm_eh_abort_handler(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* + * SCSI ABORT Command --->>> FC ABTS Command + * If return ABORT_FAIL then enter TMF process + */ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_rport_s *rport = NULL; + struct unf_lport_s *xchg_lport = NULL; + int ret; + unsigned long flag = 0; + + /* 1. Get L_Port: Point to Scsi_host */ + lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can't find port by scsi host id(0x%x)", + UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd)); + return UNF_SCSI_ABORT_FAIL; + } + + /* 2. find target Xchg for INI Abort CMND */ + xchg = unf_cm_lookup_xchg_by_cmnd_sn(lport, v_scsi_cmnd->cmnd_sn, + v_scsi_cmnd->world_id); + if (unlikely(!xchg)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_ABNORMAL, + UNF_WARN, + "[warn]Port(0x%x) can't find exchange by Cmdsn(0x%lx)", + lport->port_id, + (unsigned long)v_scsi_cmnd->cmnd_sn); + + unf_flush_ini_resp_que(lport); + + return UNF_SCSI_ABORT_SUCCESS; + } + + /* 3. increase ref_cnt to protect exchange */ + ret = (int)unf_xchg_ref_inc(xchg, INI_EH_ABORT); + if (unlikely(ret != RETURN_OK)) { + unf_flush_ini_resp_que(lport); + return UNF_SCSI_ABORT_SUCCESS; + } + + v_scsi_cmnd->upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd; + + xchg->debug_hook = UNF_TRUE; + + /* 4. Exchang L_Port/R_Port Get & check */ + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + xchg_lport = xchg->lport; + rport = xchg->rport; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + if (unlikely(!xchg_lport || !rport)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Exchange(0x%p)'s L_Port or R_Port is NULL, state(0x%x)", + xchg, xchg->io_state); + + unf_xchg_ref_dec(xchg, INI_EH_ABORT); + + if (!xchg_lport) + return UNF_SCSI_ABORT_FAIL; /* for L_Port */ + return UNF_SCSI_ABORT_SUCCESS; /* for R_Port */ + } + + /* 5. Send INI Abort Cmnd */ + ret = unf_ini_abort_cmnd(xchg_lport, xchg, v_scsi_cmnd); + + /* 6. decrease exchange ref_cnt */ + unf_xchg_ref_dec(xchg, INI_EH_ABORT); + + return ret; +} + +static unsigned int unf_tmf_timeout_recovery_default(void *v_rport, + void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_rport_s *rport = (struct unf_rport_s *)v_rport; + + lport = xchg->lport; + UNF_CHECK_VALID(0x4614, UNF_TRUE, lport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_enter_logo(lport, rport); + return RETURN_OK; +} + +void unf_abts_timeout_recovery_default(void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_rport_s *rport = (struct unf_rport_s *)v_rport; + + lport = xchg->lport; + UNF_CHECK_VALID(0x4614, UNF_TRUE, lport, return); + + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + if (INI_IO_STATE_DONE & xchg->io_state) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + return; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + if (xchg->rport_bind_jifs != rport->rport_alloc_jifs) + return; + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_enter_logo(lport, rport); +} + +unsigned int unf_tmf_timeout_recovery_special(void *v_rport, void *v_xchg) +{ + /* Do port reset or R_Port LOGO */ + int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_rport_s *rport = (struct unf_rport_s *)v_rport; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, + return UNF_RETURN_ERROR); + + lport = xchg->lport->root_lport; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, lport, + return UNF_RETURN_ERROR); + + /* 1. TMF response timeout & Marker STS timeout */ + if (!(xchg->tmf_state & + (MARKER_STS_RECEIVED | TMF_RESPONSE_RECEIVED))) { + /* TMF timeout & marker timeout */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) receive marker status timeout and do recovery", + lport->port_id); + + /* Do port reset */ + ret = unf_cm_reset_port(lport->port_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) do reset failed", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; + } + + /* 2. default case: Do LOGO process */ + unf_tmf_timeout_recovery_default(rport, xchg); + + return RETURN_OK; +} + +void unf_tmf_abnormal_recovery(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + /* + * for device(lun)/target(session) reset: + * Do port reset or R_Port LOGO + */ + if (v_lport->pfn_unf_tmf_abnormal_recovery) + v_lport->pfn_unf_tmf_abnormal_recovery((void *)v_rport, + (void *)v_xchg); +} + +static void unf_build_task_mgmt_fcp_cmnd(struct unf_fcp_cmnd_s *v_fcp_cmnd, + struct unf_scsi_cmd_s *v_scsi_cmnd, + enum unf_task_mgmt_cmnd_e v_task_mgmt) +{ + UNF_CHECK_VALID(0x1339, UNF_TRUE, v_fcp_cmnd, return); + UNF_CHECK_VALID(0x1340, UNF_TRUE, v_scsi_cmnd, return); + + unf_big_end_to_cpu((void *)v_scsi_cmnd->pc_lun_id, UNF_FCP_LUNID_LEN_8); + (*(unsigned long long *)(v_scsi_cmnd->pc_lun_id)) >>= 8; + memcpy(v_fcp_cmnd->lun, v_scsi_cmnd->pc_lun_id, + sizeof(v_fcp_cmnd->lun)); + + /* + * If the TASK MANAGEMENT FLAGS field is set to a nonzero value, + * the FCP_CDB field, the FCP_DL field, the TASK ATTRIBUTE field, + * the RDDATA bit, and the WRDATA bit shall be ignored and the + * FCP_BIDIRECTIONAL_READ_DL field shall not be + * included in the FCP_CMND IU payload + */ + v_fcp_cmnd->control = UNF_SET_TASK_MGMT_FLAGS(v_task_mgmt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "SCSI cmnd(0x%x) is task mgmt cmnd. ntrl Flag(LITTLE END) is 0x%x.", + v_task_mgmt, v_fcp_cmnd->control); +} + +int unf_send_scsi_mgmt_cmnd(struct unf_xchg_s *v_xchg, + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_scsi_cmd_s *v_scsi_cmnd, + enum unf_task_mgmt_cmnd_e v_task_mgnt_cmd_type) +{ + /* + * 1. Device/LUN reset + * 2. Target/Session reset + */ + struct unf_xchg_s *xchg = NULL; + int ret = SUCCESS; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1341, UNF_TRUE, v_xchg, return FAILED); + UNF_CHECK_VALID(0x1342, UNF_TRUE, v_lport, return FAILED); + UNF_CHECK_VALID(0x1343, UNF_TRUE, v_rport, return FAILED); + UNF_CHECK_VALID(0x1344, UNF_TRUE, v_scsi_cmnd, return FAILED); + UNF_CHECK_VALID(0x1345, UNF_TRUE, + ((v_task_mgnt_cmd_type <= UNF_FCP_TM_TERMINATE_TASK) && + (v_task_mgnt_cmd_type >= UNF_FCP_TM_QUERY_TASK_SET)), + return FAILED); + + xchg = v_xchg; + xchg->lport = v_lport; + xchg->rport = v_rport; + + /* 1. State: Up_Task */ + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + xchg->io_state |= INI_IO_STATE_UPTASK; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) { + xchg->ox_id = xchg->hot_pool_tag; + pkg.frame_head.oxid_rxid = + ((unsigned int)xchg->ox_id << 16) | xchg->rx_id; + } + + /* 2. Set TASK MANAGEMENT FLAGS of FCP_CMND to + * the corresponding task management command + */ + unf_build_task_mgmt_fcp_cmnd(&xchg->fcp_cmnd, v_scsi_cmnd, + v_task_mgnt_cmd_type); + + pkg.xchg_contex = xchg; + pkg.private[PKG_PRIVATE_XCHG_RPORT_INDEX] = v_rport->rport_index; + pkg.fcp_cmnd = &xchg->fcp_cmnd; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hot_pool_tag; + pkg.frame_head.csctl_sid = v_lport->nport_id; + pkg.frame_head.rctl_did = v_rport->nport_id; + pkg.unf_rsp_pload_bl.buffer_ptr = + (unsigned char *)xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu; + pkg.unf_rsp_pload_bl.buf_dma_addr = + v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr; + pkg.unf_rsp_pload_bl.length = PAGE_SIZE; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + if (unlikely(v_lport->b_pcie_linkdown == UNF_TRUE)) { + unf_free_lport_all_xchg(v_lport); + return SUCCESS; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) Hottag(0x%x) lunid(0x%llx)", + v_lport->port_id, v_task_mgnt_cmd_type, + v_rport->nport_id, xchg->hot_pool_tag, + *((unsigned long long *)v_scsi_cmnd->pc_lun_id)); + + /* 3. Init exchange task semaphore */ + sema_init(&xchg->task_sema, 0); + + /* 4. Send Mgmt Task to low-level */ + if (unf_hardware_start_io(v_lport, &pkg) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) failed", + v_lport->port_id, v_task_mgnt_cmd_type, + v_rport->nport_id); + + return FAILED; + } + + /* + * semaphore timeout + * + * Code review: The second input parameter needs to + * be converted to jiffies. + * set semaphore after the message is sent successfully. + * The semaphore is returned when the semaphore times out + * or is woken up. + * + * 5. The semaphore is cleared and counted when the Mgmt + * Task message is sent, + * and is Wake Up when the RSP message is received. + * If the semaphore is not Wake Up, the semaphore is + * triggered after timeout. + * That is, no RSP message is received within the timeout period. + */ + if (down_timeout(&xchg->task_sema, + (long long)msecs_to_jiffies((unsigned int)UNF_WAIT_SEM_TIMEOUT))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) timeout scsi id(0x%x) lun id(0x%x)", + v_lport->nport_id, v_task_mgnt_cmd_type, + v_rport->nport_id, + v_scsi_cmnd->scsi_id, + (unsigned int)v_scsi_cmnd->lun_id); + + /* semaphore timeout */ + ret = FAILED; + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + if (v_lport->en_states == UNF_LPORT_ST_RESET) + ret = SUCCESS; + + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + return ret; + } + + /* + * 6. NOTE: no timeout (has been waken up) + * Do Scsi_Cmnd(Mgmt Task) result checking + * + * FAILED: with error code or RSP is error + * SUCCESS: others + */ + if (xchg->scsi_cmnd_info.result == UNF_IO_SUCCESS) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp succeed", + v_lport->nport_id, v_task_mgnt_cmd_type, + v_rport->nport_id); + + ret = SUCCESS; + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp failed scsi id(0x%x) lun id(0x%x)", + v_lport->nport_id, v_task_mgnt_cmd_type, + v_rport->nport_id, + v_scsi_cmnd->scsi_id, + (unsigned int)v_scsi_cmnd->lun_id); + + ret = FAILED; + } + + return ret; +} + +int unf_cm_eh_device_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* SCSI Device/LUN Reset Command --->>> FC LUN/Device Reset Command */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int cmnd_result = 0; + int ret = SUCCESS; + + UNF_CHECK_VALID(0x1349, UNF_TRUE, v_scsi_cmnd, return FAILED); + UNF_CHECK_VALID(0x1350, UNF_TRUE, v_scsi_cmnd->pc_lun_id, + return FAILED); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]Enter device/LUN reset handler"); + + /* 1. Get L_Port */ + lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can't find port by scsi_host_id(0x%x)", + UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd)); + + return FAILED; + } + + /* 2. L_Port State checking */ + if (unlikely(lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%p) is removing", lport); + + return FAILED; + } + + /* + * 3. Get R_Port: no rport is found or rport is not ready,return ok + * from: L_Port -->> rport_scsi_table (image table) + * -->> rport_info_table + */ + rport = unf_find_rport_by_scsi_id(lport, + v_scsi_cmnd->err_code_table, + v_scsi_cmnd->err_code_table_cout, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd), + &cmnd_result); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) Can't find rport by scsi_id(0x%x)", + lport->port_id, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + return SUCCESS; + } + + /* + * 4. Set the I/O of the corresponding LUN to abort. + * + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + unf_cm_xchg_abort_by_lun( + lport, rport, + *((unsigned long long *)v_scsi_cmnd->pc_lun_id), + NULL, UNF_FALSE); + + /* 5. R_Port state check */ + if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) state(0x%x) SCSI Command(0x%p), rport is not ready", + lport->port_id, rport->nport_id, + rport->rp_state, v_scsi_cmnd); + + return SUCCESS; + } + + /* 6. Get & inc ref_cnt free Xchg for Device reset */ + xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg(lport, + UNF_XCHG_TYPE_INI); + if (unlikely(!xchg)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%p) can't get free exchange", lport); + + return FAILED; + } + + /* increase ref_cnt for protecting exchange */ + ret = (int)unf_xchg_ref_inc(xchg, INI_EH_DEVICE_RESET); + UNF_CHECK_VALID(0x1351, UNF_TRUE, (ret == RETURN_OK), return FAILED); + + /* 7. Send Device/LUN Reset to Low level */ + ret = unf_send_scsi_mgmt_cmnd(xchg, lport, rport, + v_scsi_cmnd, + UNF_FCP_TM_LOGICAL_UNIT_RESET); + if (unlikely(ret == FAILED)) { + /* + * Do port reset or R_Port LOGO: + * 1. FAILED: send failed + * 2. FAILED: semaphore timeout + * 3. SUCCESS: rcvd rsp & semaphore has been waken up + */ + unf_tmf_abnormal_recovery(lport, rport, xchg); + } + + /* + * 8. Release resource immediately if necessary + * NOTE: here, semaphore timeout or rcvd rsp + * (semaphore has been waken up) + */ + if (likely((lport->b_port_removing != UNF_TRUE) || + (lport->root_lport != lport))) + unf_cm_free_xchg(xchg->lport, xchg); + + /* decrease ref_cnt */ + unf_xchg_ref_dec(xchg, INI_EH_DEVICE_RESET); + + return SUCCESS; +} + +int unf_cm_target_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* SCSI Target Reset Command --->>> FC Session Reset/Delete Command */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int cmnd_result = 0; + int ret; + + UNF_CHECK_VALID(0x1355, UNF_TRUE, v_scsi_cmnd, return FAILED); + UNF_CHECK_VALID(0x1356, UNF_TRUE, v_scsi_cmnd->pc_lun_id, + return FAILED); + + /* 1. Get L_Port */ + lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can't find port by scsi_host_id(0x%x)", + UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd)); + + return FAILED; + } + + /* 2. L_Port State check */ + if (unlikely(lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%p) is removing", lport); + + return FAILED; + } + + /* + * 3. Get R_Port: no rport is found or rport is not ready,return ok + * from: L_Port -->> rport_scsi_table (image table) -->> + * rport_info_table + */ + rport = unf_find_rport_by_scsi_id(lport, + v_scsi_cmnd->err_code_table, + v_scsi_cmnd->err_code_table_cout, + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd), + &cmnd_result); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can't find rport by scsi_id(0x%x)", + UNF_GET_SCSI_ID_BY_CMND(v_scsi_cmnd)); + + return SUCCESS; + } + + /* + * 4. set UP_ABORT on Target IO and Session IO + * + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + unf_cm_xchg_abort_by_session(lport, rport); + + /* 5. R_Port state check */ + if (unlikely(rport->rp_state != UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) state(0x%x) is not ready, SCSI Command(0x%p)", + lport->port_id, rport->nport_id, + rport->rp_state, v_scsi_cmnd); + + return SUCCESS; + } + + /* 6. Get free Xchg for Target Reset CMND */ + xchg = (struct unf_xchg_s *)unf_cm_get_free_xchg(lport, + UNF_XCHG_TYPE_INI); + if (unlikely(!xchg)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%p) can't get free exchange", lport); + + return FAILED; + } + + /* increase ref_cnt to protect exchange */ + ret = (int)unf_xchg_ref_inc(xchg, INI_EH_DEVICE_RESET); + UNF_CHECK_VALID(0x1357, UNF_TRUE, (ret == RETURN_OK), return FAILED); + + /* 7. Send Target Reset Cmnd to low-level */ + ret = unf_send_scsi_mgmt_cmnd(xchg, lport, rport, v_scsi_cmnd, + UNF_FCP_TM_TARGET_RESET); + if (unlikely(ret == FAILED)) { + /* + * Do port reset or R_Port LOGO: + * 1. FAILED: send failed + * 2. FAILED: semaphore timeout + * 3. SUCCESS: rcvd rsp & semaphore has been waken up + */ + unf_tmf_abnormal_recovery(lport, rport, xchg); + } + + /* + * 8. Release resource immediately if necessary + * NOTE: here, semaphore timeout or rcvd rsp + * (semaphore has been waken up) + */ + if (likely((lport->b_port_removing != UNF_TRUE) || + (lport->root_lport != lport))) + unf_cm_free_xchg(xchg->lport, xchg); + + /* decrease exchange ref_cnt */ + unf_xchg_ref_dec(xchg, INI_EH_DEVICE_RESET); + + return SUCCESS; +} + +int unf_cm_bus_reset_handler(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + /* SCSI BUS Reset Command --->>> FC Port Reset Command */ + struct unf_lport_s *lport = NULL; + int cmnd_result = 0; + + /* 1. Get L_Port */ + lport = unf_find_lport_by_scsi_cmd(v_scsi_cmnd); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Can't find port by scsi_host_id(0x%x)", + UNF_GET_SCSI_HOST_ID_BY_CMND(v_scsi_cmnd)); + + return FAILED; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[event]Do port reset with scsi_bus_reset"); + + cmnd_result = unf_cm_reset_port(lport->port_id); + if (unlikely(cmnd_result == UNF_RETURN_ERROR)) + return FAILED; + else + return SUCCESS; +} + +void unf_process_scsi_mgmt_result(struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + unsigned char *rsp_info = NULL; + unsigned char rsp_code = 0; + unsigned int code_index = 0; + + /* + * LLT found that:RSP_CODE is the third byte of FCP_RSP_INFO, + * on Little endian should be byte 0, For detail FCP_4 Table 26 + * FCP_RSP_INFO field format + * + * 1. state setting + * 2. wake up semaphore + */ + UNF_CHECK_VALID(0x1321, TRUE, v_pkg, return); + UNF_CHECK_VALID(0x1322, TRUE, v_xchg, return); + + v_xchg->tmf_state |= TMF_RESPONSE_RECEIVED; + + if (UNF_GET_LL_ERR(v_pkg) != UNF_IO_SUCCESS) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Send scsi manage command failed with error code(0x%x)", + UNF_GET_LL_ERR(v_pkg)); + + v_xchg->scsi_cmnd_info.result = UNF_IO_FAILED; + + /* wakeup semaphore & return */ + up(&v_xchg->task_sema); + + return; + } + + rsp_info = v_pkg->unf_rsp_pload_bl.buffer_ptr; + if (!rsp_info && (v_pkg->unf_rsp_pload_bl.length != 0)) { + rsp_info = + (unsigned char *) + v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu; + + /* change to little end if necessary */ + if (rsp_info && (v_pkg->byte_orders & UNF_BIT_3)) + unf_big_end_to_cpu( + rsp_info, + v_pkg->unf_rsp_pload_bl.length); + } + + if (!rsp_info) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]FCP response data pointer is NULL with Xchg TAG(0x%x)", + v_xchg->hot_pool_tag); + + v_xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS; + + /* wakeup semaphore & return */ + up(&v_xchg->task_sema); + + return; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]FCP response data length(0x%x), RSP_CODE(0x%x:%x:%x:%x:%x:%x:%x:%x)", + v_pkg->unf_rsp_pload_bl.length, + rsp_info[0], + rsp_info[1], + rsp_info[2], + rsp_info[3], + rsp_info[4], + rsp_info[5], + rsp_info[6], + rsp_info[7]); + + rsp_code = rsp_info[code_index]; + if ((rsp_code == UNF_FCP_TM_RSP_COMPLETE) || + (rsp_code == UNF_FCP_TM_RSP_SUCCEED)) + v_xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS; + else + v_xchg->scsi_cmnd_info.result = UNF_IO_FAILED; + + /* wakeup semaphore & return */ + up(&v_xchg->task_sema); +} diff --git a/drivers/scsi/huawei/hifc/unf_io_abnormal.h b/drivers/scsi/huawei/hifc/unf_io_abnormal.h new file mode 100644 index 000000000000..54336ac71ea8 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_io_abnormal.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_IO__ABNORMAL_H__ +#define __UNF_IO__ABNORMAL_H__ + +#define UNF_GET_LL_ERR(v_pkg) ((v_pkg->status) >> 16) + +void unf_process_scsi_mgmt_result(struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg); +unsigned int unf_hardware_start_io(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg); + +#endif
From: Chenguangli chenguangli2@huawei.com
driver inclusion category: feature bugzilla: NA
-----------------------------------------------------------------------
This module is used to register the hifc driver FC capability to the SCSI layer.
Signed-off-by: Chenguangli chenguangli2@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Acked-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/scsi/huawei/hifc/hifc_knl_adp.h | 48 + drivers/scsi/huawei/hifc/hifc_module.c | 104 ++ drivers/scsi/huawei/hifc/hifc_module.h | 289 +++ drivers/scsi/huawei/hifc/hifc_utils.c | 72 + drivers/scsi/huawei/hifc/hifc_utils.h | 361 ++++ drivers/scsi/huawei/hifc/unf_common.h | 1893 ++++++++++++++++++++ drivers/scsi/huawei/hifc/unf_init.c | 564 ++++++ drivers/scsi/huawei/hifc/unf_log.h | 183 ++ drivers/scsi/huawei/hifc/unf_scsi.c | 1556 ++++++++++++++++ drivers/scsi/huawei/hifc/unf_scsi_common.h | 1136 ++++++++++++ 10 files changed, 6206 insertions(+) create mode 100644 drivers/scsi/huawei/hifc/hifc_knl_adp.h create mode 100644 drivers/scsi/huawei/hifc/hifc_module.c create mode 100644 drivers/scsi/huawei/hifc/hifc_module.h create mode 100644 drivers/scsi/huawei/hifc/hifc_utils.c create mode 100644 drivers/scsi/huawei/hifc/hifc_utils.h create mode 100644 drivers/scsi/huawei/hifc/unf_common.h create mode 100644 drivers/scsi/huawei/hifc/unf_init.c create mode 100644 drivers/scsi/huawei/hifc/unf_log.h create mode 100644 drivers/scsi/huawei/hifc/unf_scsi.c create mode 100644 drivers/scsi/huawei/hifc/unf_scsi_common.h
diff --git a/drivers/scsi/huawei/hifc/hifc_knl_adp.h b/drivers/scsi/huawei/hifc/hifc_knl_adp.h new file mode 100644 index 000000000000..5b05ff6eb9bd --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_knl_adp.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef HIFC_KNL_ADP_H_ +#define HIFC_KNL_ADP_H_ + +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/cpufreq.h> +#include <linux/semaphore.h> +#include <linux/sched.h> +#include <linux/kthread.h> +#include <linux/vmalloc.h> +#include <linux/version.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/workqueue.h> +#include <linux/scatterlist.h> +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_transport_fc.h> +#include <linux/sched/signal.h> + +#define __TIME_STR__ "[compiled with the kernel]" + +#define sdk_err(dev, format, ...) \ + dev_err(dev, "[COMM]"format, ##__VA_ARGS__) +#define sdk_warn(dev, format, ...) \ + dev_warn(dev, "[COMM]"format, ##__VA_ARGS__) +#define sdk_notice(dev, format, ...) \ + dev_notice(dev, "[COMM]"format, ##__VA_ARGS__) +#define sdk_info(dev, format, ...) \ + dev_info(dev, "[COMM]"format, ##__VA_ARGS__) + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_module.c b/drivers/scsi/huawei/hifc/hifc_module.c new file mode 100644 index 000000000000..127e8f9de9ea --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_module.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "hifc_module.h" + +struct unf_cm_handle_op_s hifc_cm_handle = { 0 }; +unsigned int dif_sgl_mode; +unsigned int max_speed = HIFC_SPEED_32G; +unsigned int accum_db_num = 1; +unsigned int dif_type = 0x1; +unsigned int wqe_page_size = 4096; +unsigned int wqe_pre_load = 6; +unsigned int combo_length_kb = 8; +unsigned int cos_bit_map = 0x1f; +unsigned int hifc_dif_type; +unsigned int hifc_dif_enable; +unsigned char hifc_guard; + +/* dfx counter */ +atomic64_t rx_tx_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t rx_tx_err[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t scq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t aeq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t dif_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t mail_box_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +atomic64_t up_err_event_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +unsigned long long link_event_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_EVENT_CNT]; +unsigned long long link_reason_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_REASON_CNT]; +unsigned long long hba_stat[HIFC_MAX_PORT_NUM][HIFC_HBA_STAT_BUTT]; +atomic64_t com_up_event_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; + +static void hifc_realease_cmo_op_handle(void) +{ + memset(&hifc_cm_handle, 0, sizeof(struct unf_cm_handle_op_s)); +} + +static void hifc_check_module_para(void) +{ + if (dif_sgl_mode != 0) + dif_sgl_mode = 1; +} + +int hifc_init_module(void) +{ + int ret = RETURN_OK; + + ret = unf_common_init(); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]unf_common_init failed"); + + return RETURN_ERROR_S32; + } + + memset(rx_tx_stat, 0, sizeof(rx_tx_stat)); + memset(rx_tx_err, 0, sizeof(rx_tx_err)); + memset(scq_err_stat, 0, sizeof(scq_err_stat)); + memset(aeq_err_stat, 0, sizeof(aeq_err_stat)); + memset(dif_err_stat, 0, sizeof(dif_err_stat)); + memset(link_event_stat, 0, sizeof(link_event_stat)); + memset(link_reason_stat, 0, sizeof(link_reason_stat)); + memset(hba_stat, 0, sizeof(hba_stat)); + memset(&hifc_cm_handle, 0, sizeof(struct unf_cm_handle_op_s)); + memset(up_err_event_stat, 0, sizeof(up_err_event_stat)); + memset(mail_box_stat, 0, sizeof(mail_box_stat)); + memset(hifc_hba, 0, sizeof(hifc_hba)); + + spin_lock_init(&probe_spin_lock); + + /* 2. Module parameters check */ + hifc_check_module_para(); + + /* 4. Get COM Handlers used for low_level */ + if (unf_get_cm_handle_op(&hifc_cm_handle) != RETURN_OK) { + hifc_realease_cmo_op_handle(); + return RETURN_ERROR_S32; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Init HIFC module succeed"); + + return ret; +} + +void hifc_exit_module(void) +{ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]HIFC module removing..."); + + hifc_realease_cmo_op_handle(); + + /* 2. Unregister FC COM module(level) */ + unf_common_exit(); +} + +module_param(dif_sgl_mode, uint, 0444); +module_param(max_speed, uint, 0444); +module_param(wqe_page_size, uint, 0444); +module_param(combo_length_kb, uint, 0444); +module_param(cos_bit_map, uint, 0444); + diff --git a/drivers/scsi/huawei/hifc/hifc_module.h b/drivers/scsi/huawei/hifc/hifc_module.h new file mode 100644 index 000000000000..5ae9a4962495 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_module.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_MODULE_H__ +#define __HIFC_MODULE_H__ +#include "unf_log.h" +#include "unf_common.h" +#include "hifc_utils.h" +#include "hifc_hba.h" + +#define HIFC_SPEED_16G 0x10 +#define HIFC_SPEED_32G 0x20 +#define HIFC_MAX_PORT_NUM HIFC_MAX_PROBE_PORT_NUM +#define HIFC_TASK_TYPE_STAT_NUM 128 +#define HIFC_MAX_LINK_EVENT_CNT 4 +#define HIFC_MAX_LINK_REASON_CNT 256 + +/* Declare the global function. */ +extern struct unf_cm_handle_op_s hifc_cm_handle; +extern unsigned int max_speed; +extern unsigned int accum_db_num; +extern unsigned int wqe_page_size; +extern unsigned int dif_type; +extern unsigned int wqe_pre_load; +extern unsigned int combo_length_kb; +extern unsigned int cos_bit_map; + +extern atomic64_t rx_tx_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t rx_tx_err[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t scq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t aeq_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t dif_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t mail_box_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern atomic64_t com_up_event_err_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern unsigned long long link_event_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_EVENT_CNT]; +extern unsigned long long link_reason_stat[HIFC_MAX_PORT_NUM][HIFC_MAX_LINK_REASON_CNT]; +extern atomic64_t up_err_event_stat[HIFC_MAX_PORT_NUM][HIFC_TASK_TYPE_STAT_NUM]; +extern unsigned long long hba_stat[HIFC_MAX_PORT_NUM][HIFC_HBA_STAT_BUTT]; + +#define HIFC_LINK_EVENT_STAT(v_hba, link_ent) \ + (link_event_stat[(v_hba)->probe_index][link_ent]++) +#define HIFC_LINK_REASON_STAT(v_hba, link_rsn) \ + (link_reason_stat[(v_hba)->probe_index][link_rsn]++) +#define HIFC_HBA_STAT(v_hba, hba_stat_type) \ + (hba_stat[(v_hba)->probe_index][hba_stat_type]++) + +#define HIFC_UP_ERR_EVENT_STAT(v_hba, err_type) \ + (atomic64_inc(&up_err_event_stat[(v_hba)->probe_index][err_type])) +#define HIFC_UP_ERR_EVENT_STAT_READ(probe_index, io_type) \ + (atomic64_read(&up_err_event_stat[probe_index][io_type])) +#define HIFC_DIF_ERR_STAT(v_hba, dif_err) \ + (atomic64_inc(&dif_err_stat[(v_hba)->probe_index][dif_err])) +#define HIFC_DIF_ERR_STAT_READ(probe_index, dif_err) \ + (atomic64_read(&dif_err_stat[probe_index][dif_err])) + +#define HIFC_IO_STAT(v_hba, io_type) \ + (atomic64_inc(&rx_tx_stat[(v_hba)->probe_index][io_type])) +#define HIFC_IO_STAT_READ(probe_index, io_type) \ + (atomic64_read(&rx_tx_stat[probe_index][io_type])) + +#define HIFC_ERR_IO_STAT(v_hba, io_type) \ + (atomic64_inc(&rx_tx_err[(v_hba)->probe_index][io_type])) +#define HIFC_ERR_IO_STAT_READ(probe_index, io_type) \ + (atomic64_read(&rx_tx_err[probe_index][io_type])) + +#define HIFC_SCQ_ERR_TYPE_STAT(v_hba, err_type) \ + (atomic64_inc(&scq_err_stat[(v_hba)->probe_index][err_type])) +#define HIFC_SCQ_ERR_TYPE_STAT_READ(probe_index, io_type) \ + (atomic64_read(&scq_err_stat[probe_index][io_type])) +#define HIFC_AEQ_ERR_TYPE_STAT(v_hba, err_type) \ + (atomic64_inc(&aeq_err_stat[(v_hba)->probe_index][err_type])) +#define HIFC_AEQ_ERR_TYPE_STAT_READ(probe_index, io_type) \ + (atomic64_read(&aeq_err_stat[probe_index][io_type])) + +#define HIFC_MAILBOX_STAT(v_hba, io_type) \ + (atomic64_inc(&mail_box_stat[(v_hba)->probe_index][io_type])) + +#define HIFC_COM_UP_ERR_EVENT_STAT(v_hba, err_type) \ + (atomic64_inc(&com_up_event_err_stat[(v_hba)->probe_index][err_type])) +#define HIFC_COM_UP_ERR_EVENT_STAT_READ(probe_index, err_type) \ + (atomic64_read(&com_up_event_err_stat[probe_index][err_type])) + +/* + *----------------------------------------------* + * Define function * + *---------------------------------------------- + */ + +#define UNF_LOWLEVEL_ALLOC_LPORT(v_lport, fc_port, stLowLevel)\ + do {\ + if (hifc_cm_handle.pfn_unf_alloc_local_port) { \ + v_lport = \ + hifc_cm_handle.pfn_unf_alloc_local_port((fc_port), \ + (stLowLevel));\ + } else { \ + v_lport = NULL; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_ELS_PKG(v_ret, fc_port, pkg) \ + do { \ + if (hifc_cm_handle.pfn_unf_receive_els_pkg) {\ + v_ret =\ + hifc_cm_handle.pfn_unf_receive_els_pkg(\ + (fc_port), (pkg));\ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_SEND_ELS_DONE(v_ret, fc_port, pkg) \ + do { \ + if (hifc_cm_handle.pfn_unf_send_els_done) {\ + v_ret = hifc_cm_handle.pfn_unf_send_els_done((fc_port),\ + (pkg)); \ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_GS_PKG(v_ret, fc_port, pkg)\ + do { \ + if (hifc_cm_handle.pfn_unf_receive_gs_pkg) {\ + v_ret = hifc_cm_handle.pfn_unf_receive_gs_pkg(\ + (fc_port),\ + (pkg)); \ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_GET_CFG_PARMS(v_ret, \ + v_section_name, \ + v_cfg_parm, \ + v_cfg_value, \ + v_item_num) \ + do { \ + if (hifc_cm_handle.pfn_unf_get_cfg_parms) { \ + v_ret = (unsigned int)\ + hifc_cm_handle.pfn_unf_get_cfg_parms(\ + (v_section_name), \ + (v_cfg_parm), \ + (v_cfg_value), \ + (v_item_num)); \ + } else { \ + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT,\ + UNF_WARN,\ + "Get config parameter function is NULL.");\ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RELEASE_LOCAL_PORT(v_ret, lport) \ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_release_local_port)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret =\ + hifc_cm_handle.pfn_unf_release_local_port(\ + (lport));\ + } \ + } while (0) + +#define UNF_LOWLEVEL_TO_CM_HINICADM(v_ret, lport, pkg) \ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_ioctl_to_com_handler)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_ioctl_to_com_handler(\ + lport, pkg); \ + } \ + } while (0) + +#define UNF_CM_GET_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len) \ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_cm_get_sgl_entry)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_cm_get_sgl_entry(\ + pkg, v_buf, v_buf_len);\ + } \ + } while (0) + +#define UNF_CM_GET_DIF_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len)\ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_cm_get_dif_sgl_entry)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_cm_get_dif_sgl_entry(\ + pkg,\ + v_buf,\ + v_buf_len);\ + } \ + } while (0) + +#define UNF_GET_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len, v_dif_flag) \ + do { \ + if (v_dif_flag) { \ + UNF_CM_GET_DIF_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len);\ + } else { \ + UNF_CM_GET_SGL_ENTRY(v_ret, pkg, v_buf, v_buf_len);\ + } \ + } while (0) + +#define UNF_GET_FREE_ESGL_PAGE(v_ret, lport, pkg) \ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_get_one_free_esgl_page)) {\ + v_ret = NULL; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_get_one_free_esgl_page(\ + lport, pkg); \ + } \ + } while (0) + +#define UNF_LOWLEVEL_SCSI_COMPLETED(v_ret, lport, pkg) \ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_receive_ini_rsponse)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_receive_ini_rsponse(\ + lport, pkg);\ + } \ + } while (0) + +#define UNF_LOWLEVEL_PORT_EVENT(v_ret, lport, v_events, v_input)\ + do { \ + if (unlikely(!hifc_cm_handle.pfn_unf_fc_port_link_event)) {\ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = hifc_cm_handle.pfn_unf_fc_port_link_event(\ + lport, v_events, v_input);\ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_FC4LS_PKG(v_ret, fc_port, pkg)\ + do { \ + if (hifc_cm_handle.pfn_unf_receive_fc4_pkg) {\ + v_ret = hifc_cm_handle.pfn_unf_receive_fc4_pkg(\ + (fc_port), (pkg));\ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_SEND_FC4LS_DONE(v_ret, lport, pkg) \ + do { \ + if (hifc_cm_handle.pfn_unf_send_fc4_done) {\ + v_ret = hifc_cm_handle.pfn_unf_send_fc4_done(\ + (lport), (pkg));\ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_BLS_PKG(v_ret, lport, pkg) \ + do { \ + if (hifc_cm_handle.pfn_unf_receive_bls_pkg) {\ + v_ret = hifc_cm_handle.pfn_unf_receive_bls_pkg(\ + (lport), (pkg)); \ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_MARKER_STS(v_ret, lport, pkg)\ + do { \ + if (hifc_cm_handle.pfn_unf_receive_marker_status) {\ + v_ret = hifc_cm_handle.pfn_unf_receive_marker_status(\ + (lport), (pkg));\ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#define UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(v_ret, lport, pkg) \ + do { \ + if (hifc_cm_handle.pfn_unf_receive_abts_marker_status) {\ + v_ret =\ + hifc_cm_handle.pfn_unf_receive_abts_marker_status(\ + (lport), (pkg));\ + } else { \ + v_ret = UNF_RETURN_ERROR; \ + } \ + } while (0) + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_utils.c b/drivers/scsi/huawei/hifc/hifc_utils.c new file mode 100644 index 000000000000..c2c6ef1fe120 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_utils.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "hifc_utils.h" +#include "unf_log.h" +#include "unf_common.h" + +void hifc_cpu_to_big64(void *v_addr, unsigned int size) +{ + unsigned int index = 0; + unsigned int cnt = 0; + unsigned long long *temp = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_addr, dump_stack(); return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + (size % HIFC_QWORD_BYTE) == 0, dump_stack(); return); + + temp = (unsigned long long *)v_addr; + cnt = HIFC_SHIFT_TO_U64(size); + + for (index = 0; index < cnt; index++) { + *temp = cpu_to_be64(*temp); + temp++; + } +} + +void hifc_big_to_cpu64(void *v_addr, unsigned int size) +{ + unsigned int index = 0; + unsigned int cnt = 0; + unsigned long long *tmp = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + v_addr, dump_stack(); return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + (size % HIFC_QWORD_BYTE) == 0, dump_stack(); return); + + tmp = (unsigned long long *)v_addr; + cnt = HIFC_SHIFT_TO_U64(size); + + for (index = 0; index < cnt; index++) { + *tmp = be64_to_cpu(*tmp); + tmp++; + } +} + +void hifc_cpu_to_big32(void *v_addr, unsigned int size) +{ + unf_cpu_to_big_end(v_addr, size); +} + +void hifc_big_to_cpu32(void *v_addr, unsigned int size) +{ + if (size % UNF_BYTES_OF_DWORD) + dump_stack(); + unf_big_end_to_cpu(v_addr, size); +} + +unsigned int hifc_log2n(unsigned int val) +{ + unsigned int result = 0; + unsigned int logn = (val >> 1); + + while (logn) { + logn >>= 1; + result++; + } + return result; +} diff --git a/drivers/scsi/huawei/hifc/hifc_utils.h b/drivers/scsi/huawei/hifc/hifc_utils.h new file mode 100644 index 000000000000..548e5c9bb95d --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_utils.h @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_UTILS_H__ +#define __HIFC_UTILS_H__ + +#define UNF_ZERO 0 +#define HIFC_BIT(n) (0x1UL << (n)) +#define HIFC_BIT_0 HIFC_BIT(0) +#define HIFC_BIT_1 HIFC_BIT(1) +#define HIFC_BIT_2 HIFC_BIT(2) +#define HIFC_BIT_3 HIFC_BIT(3) +#define HIFC_BIT_4 HIFC_BIT(4) +#define HIFC_BIT_5 HIFC_BIT(5) +#define HIFC_BIT_6 HIFC_BIT(6) +#define HIFC_BIT_7 HIFC_BIT(7) +#define HIFC_BIT_8 HIFC_BIT(8) +#define HIFC_BIT_9 HIFC_BIT(9) +#define HIFC_BIT_10 HIFC_BIT(10) +#define HIFC_BIT_11 HIFC_BIT(11) +#define HIFC_BIT_12 HIFC_BIT(12) +#define HIFC_BIT_13 HIFC_BIT(13) +#define HIFC_BIT_14 HIFC_BIT(14) +#define HIFC_BIT_15 HIFC_BIT(15) +#define HIFC_BIT_16 HIFC_BIT(16) +#define HIFC_BIT_17 HIFC_BIT(17) +#define HIFC_BIT_18 HIFC_BIT(18) +#define HIFC_BIT_19 HIFC_BIT(19) +#define HIFC_BIT_20 HIFC_BIT(20) +#define HIFC_BIT_21 HIFC_BIT(21) +#define HIFC_BIT_22 HIFC_BIT(22) +#define HIFC_BIT_23 HIFC_BIT(23) +#define HIFC_BIT_24 HIFC_BIT(24) +#define HIFC_BIT_25 HIFC_BIT(25) +#define HIFC_BIT_26 HIFC_BIT(26) +#define HIFC_BIT_27 HIFC_BIT(27) +#define HIFC_BIT_28 HIFC_BIT(28) +#define HIFC_BIT_29 HIFC_BIT(29) +#define HIFC_BIT_30 HIFC_BIT(30) +#define HIFC_BIT_31 HIFC_BIT(31) + +#define HIFC_GET_BITS(data, mask) ((data) & (mask)) /* Obtains the bit */ +#define HIFC_SET_BITS(data, mask) ((data) |= (mask)) /* set the bit */ +#define HIFC_CLR_BITS(data, mask) ((data) &= ~(mask)) /* clear the bit */ + +/* Byte alignment */ +#define HIFC_ALIGN_N(n) __attribute__((__packed, __aligned(n))) +#define HIFC_ALIGN_1 HIFC_ALIGN_N(1) +#define HIFC_ALIGN_2 HIFC_ALIGN_N(2) +#define HIFC_ALIGN_4 HIFC_ALIGN_N(4) +#define HIFC_ALIGN_8 HIFC_ALIGN_N(8) + +#define HIFC_ADJUST_ALIGN_4(n) ((n) - (n) % 4) + +#define HIFC_LSB(x) ((unsigned char)(x)) +#define HIFC_MSB(x) ((unsigned char)((unsigned short)(x) >> 8)) + +#define HIFC_LSW(x) ((unsigned short)(x)) +#define HIFC_MSW(x) ((unsigned short)((unsigned int)(x) >> 16)) + +#define HIFC_LSD(x) ((unsigned int)((unsigned long long)(x))) +#define HIFC_MSD(x) ((unsigned int)((((unsigned long long)(x)) >> 16) >> 16)) + +#define HIFC_BYTES_TO_QW_NUM(x) ((x) >> 3) +#define HIFC_BYTES_TO_DW_NUM(x) ((x) >> 2) + +#define UNF_GET_SHIFTMASK(__src, __shift, __mask) \ + (((__src) & (__mask)) >> (__shift)) +#define UNF_FC_SET_SHIFTMASK(__des, __val, __shift, __mask)\ + ((__des) = \ + (((__des) & ~(__mask)) | (((__val) << (__shift)) & (__mask)))) + +/* D_ID */ +#define UNF_FC_HEADER_DID_MASK 0x00FFFFFF +#define UNF_FC_HEADER_DID_SHIFT 0 +#define UNF_FC_HEADER_DID_DWORD 0 +#define UNF_GET_FC_HEADER_DID(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DID_DWORD],\ + UNF_FC_HEADER_DID_SHIFT, UNF_FC_HEADER_DID_MASK) + +#define UNF_SET_FC_HEADER_DID(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DID_DWORD],\ + __val, UNF_FC_HEADER_DID_SHIFT, UNF_FC_HEADER_DID_MASK) + +/* R_CTL */ +#define UNF_FC_HEADER_RCTL_MASK 0xFF000000 +#define UNF_FC_HEADER_RCTL_SHIFT 24 +#define UNF_FC_HEADER_RCTL_DWORD 0 +#define UNF_GET_FC_HEADER_RCTL(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RCTL_DWORD],\ + UNF_FC_HEADER_RCTL_SHIFT, UNF_FC_HEADER_RCTL_MASK) + +#define UNF_SET_FC_HEADER_RCTL(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RCTL_DWORD],\ + __val, UNF_FC_HEADER_RCTL_SHIFT, UNF_FC_HEADER_RCTL_MASK) + +/* S_ID */ +#define UNF_FC_HEADER_SID_MASK 0x00FFFFFF +#define UNF_FC_HEADER_SID_SHIFT 0 +#define UNF_FC_HEADER_SID_DWORD 1 +#define UNF_GET_FC_HEADER_SID(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SID_DWORD],\ + UNF_FC_HEADER_SID_SHIFT, UNF_FC_HEADER_SID_MASK) +#define UNF_SET_FC_HEADER_SID(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SID_DWORD],\ + __val, UNF_FC_HEADER_SID_SHIFT, UNF_FC_HEADER_SID_MASK) + +/* CS_CTL */ +#define UNF_FC_HEADER_CS_CTL_MASK 0xFF000000 +#define UNF_FC_HEADER_CS_CTL_SHIFT 24 +#define UNF_FC_HEADER_CS_CTL_DWORD 1 +#define UNF_GET_FC_HEADER_CS_CTL(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_CS_CTL_DWORD],\ + UNF_FC_HEADER_CS_CTL_SHIFT, UNF_FC_HEADER_CS_CTL_MASK) + +#define UNF_SET_FC_HEADER_CS_CTL(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_CS_CTL_DWORD],\ + __val, UNF_FC_HEADER_CS_CTL_SHIFT, UNF_FC_HEADER_CS_CTL_MASK) + +/* F_CTL */ +#define UNF_FC_HEADER_FCTL_MASK 0x00FFFFFF +#define UNF_FC_HEADER_FCTL_SHIFT 0 +#define UNF_FC_HEADER_FCTL_DWORD 2 +#define UNF_GET_FC_HEADER_FCTL(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_FCTL_DWORD],\ + UNF_FC_HEADER_FCTL_SHIFT, UNF_FC_HEADER_FCTL_MASK) +#define UNF_SET_FC_HEADER_FCTL(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_FCTL_DWORD],\ + __val, UNF_FC_HEADER_FCTL_SHIFT, UNF_FC_HEADER_FCTL_MASK) + +/* TYPE */ +#define UNF_FC_HEADER_TYPE_MASK 0xFF000000 +#define UNF_FC_HEADER_TYPE_SHIFT 24 +#define UNF_FC_HEADER_TYPE_DWORD 2 +#define UNF_GET_FC_HEADER_TYPE(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_TYPE_DWORD],\ + UNF_FC_HEADER_TYPE_SHIFT, UNF_FC_HEADER_TYPE_MASK) + +#define UNF_SET_FC_HEADER_TYPE(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_TYPE_DWORD],\ + __val, UNF_FC_HEADER_TYPE_SHIFT, UNF_FC_HEADER_TYPE_MASK) + +/* SEQ_CNT */ +#define UNF_FC_HEADER_SEQ_CNT_MASK 0x0000FFFF +#define UNF_FC_HEADER_SEQ_CNT_SHIFT 0 +#define UNF_FC_HEADER_SEQ_CNT_DWORD 3 +#define UNF_GET_FC_HEADER_SEQ_CNT(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_CNT_DWORD],\ + UNF_FC_HEADER_SEQ_CNT_SHIFT, UNF_FC_HEADER_SEQ_CNT_MASK) + +#define UNF_SET_FC_HEADER_SEQ_CNT(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_CNT_DWORD],\ + __val, UNF_FC_HEADER_SEQ_CNT_SHIFT, UNF_FC_HEADER_SEQ_CNT_MASK) + +/* DF_CTL */ +#define UNF_FC_HEADER_DF_CTL_MASK 0x00FF0000 +#define UNF_FC_HEADER_DF_CTL_SHIFT 16 +#define UNF_FC_HEADER_DF_CTL_DWORD 3 +#define UNF_GET_FC_HEADER_DF_CTL(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DF_CTL_DWORD],\ + UNF_FC_HEADER_DF_CTL_SHIFT, UNF_FC_HEADER_DF_CTL_MASK) +#define UNF_SET_FC_HEADER_DF_CTL(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_DF_CTL_DWORD],\ + __val, UNF_FC_HEADER_DF_CTL_SHIFT, UNF_FC_HEADER_DF_CTL_MASK) + +/* SEQ_ID */ +#define UNF_FC_HEADER_SEQ_ID_MASK 0xFF000000 +#define UNF_FC_HEADER_SEQ_ID_SHIFT 24 +#define UNF_FC_HEADER_SEQ_ID_DWORD 3 +#define UNF_GET_FC_HEADER_SEQ_ID(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_ID_DWORD],\ + UNF_FC_HEADER_SEQ_ID_SHIFT, UNF_FC_HEADER_SEQ_ID_MASK) +#define UNF_SET_FC_HEADER_SEQ_ID(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_SEQ_ID_DWORD],\ + __val, UNF_FC_HEADER_SEQ_ID_SHIFT, UNF_FC_HEADER_SEQ_ID_MASK) + +/* RX_ID */ +#define UNF_FC_HEADER_RXID_MASK 0x0000FFFF +#define UNF_FC_HEADER_RXID_SHIFT 0 +#define UNF_FC_HEADER_RXID_DWORD 4 +#define UNF_GET_FC_HEADER_RXID(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RXID_DWORD],\ + UNF_FC_HEADER_RXID_SHIFT, UNF_FC_HEADER_RXID_MASK) +#define UNF_SET_FC_HEADER_RXID(__pfcheader, __val)\ + UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_RXID_DWORD],\ + __val, UNF_FC_HEADER_RXID_SHIFT, UNF_FC_HEADER_RXID_MASK) + +/* OX_ID */ +#define UNF_FC_HEADER_OXID_MASK 0xFFFF0000 +#define UNF_FC_HEADER_OXID_SHIFT 16 +#define UNF_FC_HEADER_OXID_DWORD 4 +#define UNF_GET_FC_HEADER_OXID(__pfcheader)\ + ((unsigned short)UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_OXID_DWORD],\ + UNF_FC_HEADER_OXID_SHIFT\ + , UNF_FC_HEADER_OXID_MASK)) + +#define UNF_SET_FC_HEADER_OXID(__pfcheader, __val)\ + (UNF_FC_SET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[UNF_FC_HEADER_OXID_DWORD],\ + __val, UNF_FC_HEADER_OXID_SHIFT, UNF_FC_HEADER_OXID_MASK)) + +/* PRLI PARAM 3 */ +#define HIFC_PRLI_PARAM_WXFER_ENABLE_MASK 0x00000001 +#define HIFC_PRLI_PARAM_WXFER_ENABLE_SHIFT 0 +#define HIFC_PRLI_PARAM_WXFER_DWORD 3 +#define HIFC_GET_PRLI_PARAM_WXFER(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)(__pfcheader))[HIFC_PRLI_PARAM_WXFER_DWORD],\ + HIFC_PRLI_PARAM_WXFER_ENABLE_SHIFT, HIFC_PRLI_PARAM_WXFER_ENABLE_MASK) + +#define HIFC_PRLI_PARAM_CONF_ENABLE_MASK 0x00000080 +#define HIFC_PRLI_PARAM_CONF_ENABLE_SHIFT 7 +#define HIFC_PRLI_PARAM_CONF_DWORD 3 +#define HIFC_GET_PRLI_PARAM_CONF(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)(__pfcheader))[HIFC_PRLI_PARAM_CONF_DWORD],\ + HIFC_PRLI_PARAM_CONF_ENABLE_SHIFT, HIFC_PRLI_PARAM_CONF_ENABLE_MASK) + +#define HIFC_PRLI_PARAM_REC_ENABLE_MASK 0x00000400 +#define HIFC_PRLI_PARAM_REC_ENABLE_SHIFT 10 +#define HIFC_PRLI_PARAM_CONF_REC 3 +#define HIFC_GET_PRLI_PARAM_REC(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)(__pfcheader))[HIFC_PRLI_PARAM_CONF_REC],\ + HIFC_PRLI_PARAM_REC_ENABLE_SHIFT, HIFC_PRLI_PARAM_REC_ENABLE_MASK) + +#define HIFC_WQE_TYPE_MASK 0x000000FF +#define HIFC_WQE_TYPE_SHIFT 0 +#define HIFC_WQE_TYPE_DWORD 0 +#define HIFC_GET_WQE_TYPE_BE(__pfcheader)\ + UNF_GET_SHIFTMASK(\ + ((unsigned int *)(void *)__pfcheader)[HIFC_WQE_TYPE_DWORD],\ + HIFC_WQE_TYPE_SHIFT, HIFC_WQE_TYPE_MASK) + +#define HIFC_MAKE_64BIT_ADDR(__high32, __low32) \ + (unsigned long long)(((unsigned long long)(__high32) << 32) |\ + (unsigned long long)(__low32)) + +#define HIFC_TRACE(log_id, log_att, log_level, fmt, ...) \ + UNF_TRACE(log_id, log_att, log_level, fmt, ##__VA_ARGS__) + +/* Valid check */ +#define HIFC_CHECK(log_id, condition, fail_do) \ + do { \ + if (unlikely(!(condition))) { \ + HIFC_TRACE((log_id), UNF_LOG_IO_ATT, UNF_ERR, \ + "[err]Function:%s parameter check[%s] invalid",\ + __func__, #condition); \ + fail_do; \ + } \ + } while (0) + +#define PRINT_IN_MBOX(dbg_level, data, count) \ + do { \ + unsigned int index = 0; \ + if ((dbg_level) <= unf_dbg_level) { \ + printk("HIFC send inbound mailbox: "); \ + for (index = 0; index < (count) / 4; index++) { \ + printk("%08x ", \ + (((unsigned int *)(data))[index]));\ + } \ + printk("\n"); \ + } \ + } while (0) +#define PRINT_OUT_MBOX(dbg_level, data, count) \ + do { \ + unsigned int index = 0; \ + if ((dbg_level) <= unf_dbg_level) { \ + printk("HIFC receive outbound mailbox: "); \ + for (index = 0; index < (count) / 4; index++) { \ + printk("%08x ",\ + (((unsigned int *)(data))[index]));\ + } \ + printk("\n"); \ + } \ + } while (0) + +#define PRINT_INBOUND_IOB(dbg_level, data, count) \ + do { \ + unsigned int index = 0; \ + if ((dbg_level) <= unf_dbg_level) { \ + printk("HIFC send inbound iob: "); \ + for (index = 0; index < (count) / 4; index++) { \ + printk("%08x ",\ + (((unsigned int *)(data))[index]));\ + } \ + printk("\n"); \ + } \ + } while (0) + +#define PRINT_OUTBOUND_IOB(dbg_level, data, count) \ + do { \ + unsigned int index = 0; \ + if ((dbg_level) <= unf_dbg_level) { \ + printk("HIFC receive outbound iob: "); \ + for (index = 0; index < (count) / 4; index++) { \ + printk("%08x ",\ + (((unsigned int *)(data))[index]));\ + } \ + printk("\n"); \ + } \ + } while (0) +#define HIFC_REFERNCE_VAR(ref, cmp, ret) + +#define RETURN_ERROR_S32 (-1) +#define UNF_RETURN_ERROR_S32 (-1) + +enum HIFC_HBA_ERR_STAT_E { + HIFC_STAT_CTXT_FLUSH_DONE = 0, + HIFC_STAT_SQ_WAIT_EMPTY, + HIFC_STAT_LAST_GS_SCQE, + HIFC_STAT_SQ_POOL_EMPTY, + HIFC_STAT_PARENT_IO_FLUSHED, + HIFC_STAT_ROOT_IO_FLUSHED, /* 5 */ + HIFC_STAT_ROOT_SQ_FULL, + HIFC_STAT_ELS_RSP_EXCH_REUSE, + HIFC_STAT_GS_RSP_EXCH_REUSE, + HIFC_STAT_SQ_IO_BUFFER_CLEARED, + HIFC_STAT_PARENT_SQ_NOT_OFFLOADED, /* 10 */ + HIFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK, + HIFC_STAT_PARENT_SQ_INVALID_CACHED_ID, + HIFC_HBA_STAT_BUTT +}; + +#define HIFC_DWORD_BYTE 4 +#define HIFC_QWORD_BYTE 8 +#define HIFC_SHIFT_TO_U64(x) ((x) >> 3) +#define HIFC_SHIFT_TO_U32(x) ((x) >> 2) + +void hifc_cpu_to_big64(void *v_addr, unsigned int size); +void hifc_big_to_cpu64(void *v_addr, unsigned int size); +void hifc_cpu_to_big32(void *v_addr, unsigned int size); +void hifc_big_to_cpu32(void *v_addr, unsigned int size); +unsigned int hifc_log2n(unsigned int val); + +#endif /* __HIFC_UTILS_H__ */ + diff --git a/drivers/scsi/huawei/hifc/unf_common.h b/drivers/scsi/huawei/hifc/unf_common.h new file mode 100644 index 000000000000..7f5161676ebb --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_common.h @@ -0,0 +1,1893 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_COMMON_H +#define __UNF_COMMON_H + +#include "unf_scsi_common.h" + +/* V/C version number */ +#define UNF_MAJOR_VERSION "3" +/* B version, B0XX Corresponding x.x */ +#define UNF_B_VERSION "5.0" +/* Indicates the minor version number of the driver */ +#define UNF_DRIVER_VERSION "8" +/* version num */ +#define UNF_FC_VERSION UNF_MAJOR_VERSION "." UNF_B_VERSION "." UNF_DRIVER_VERSION +extern unsigned int unf_dbg_level; +extern unsigned int hifc_dif_type; +extern unsigned int hifc_dif_enable; +extern unsigned char hifc_guard; + +#define RETURN_ERROR_S32 (-1) +#define UNF_RETURN_ERROR_S32 (-1) + +#define UNF_IO_SUCCESS 0x00000000 +/* the host system aborted the command */ +#define UNF_IO_ABORTED 0x00000001 +#define UNF_IO_FAILED 0x00000002 +#define UNF_IO_ABORT_ABTS 0x00000003 +#define UNF_IO_ABORT_LOGIN 0x00000004 /* abort login */ +/* reset event aborted the transport */ +#define UNF_IO_ABORT_REET 0x00000005 +#define UNF_IO_ABORT_FAILED 0x00000006 /* abort failed */ +/* data out of order ,data reassembly error */ +#define UNF_IO_OUTOF_ORDER 0x00000007 +#define UNF_IO_FTO 0x00000008 /* frame time out */ +#define UNF_IO_LINK_FAILURE 0x00000009 +#define UNF_IO_OVER_FLOW 0x0000000a /* data over run */ +#define UNF_IO_RSP_OVER 0x0000000b +#define UNF_IO_LOST_FRAME 0x0000000c +#define UNF_IO_UNDER_FLOW 0x0000000d /* data under run */ +#define UNF_IO_HOST_PROG_ERROR 0x0000000e +#define UNF_IO_SEST_PROG_ERROR 0x0000000f +#define UNF_IO_INVALID_ENTRY 0x00000010 +#define UNF_IO_ABORT_SEQ_NOT 0x00000011 +#define UNF_IO_REJECT 0x00000012 +#define UNF_IO_RS_INFO 0x00000013 +#define UNF_IO_EDC_IN_ERROR 0x00000014 +#define UNF_IO_EDC_OUT_ERROR 0x00000015 +#define UNF_IO_UNINIT_KEK_ERR 0x00000016 +#define UNF_IO_DEK_OUTOF_RANGE 0x00000017 +#define UNF_IO_KEY_UNWRAP_ERR 0x00000018 +#define UNF_IO_KEY_TAG_ERR 0x00000019 +#define UNF_IO_KEY_ECC_ERR 0x0000001a +#define UNF_IO_BLOCK_SIZE_ERROR 0x0000001b +#define UNF_IO_ILLEGAL_CIPHER_MODE 0x0000001c +#define UNF_IO_CLEAN_UP 0x0000001d +#define UNF_SRR_RECEIVE 0x0000001e /* receive srr */ +/* The target device sent an ABTS to abort the I/O. */ +#define UNF_IO_ABORTED_BY_TARGET 0x0000001f +#define UNF_IO_TRANSPORT_ERROR 0x00000020 +#define UNF_IO_LINK_FLASH 0x00000021 +#define UNF_IO_TIMEOUT 0x00000022 +#define UNF_IO_PORT_UNAVAILABLE 0x00000023 +#define UNF_IO_PORT_LOGOUT 0x00000024 +#define UNF_IO_PORT_CFG_CHG 0x00000025 +#define UNF_IO_FIRMWARE_RES_UNAVAILABLE 0x00000026 +#define UNF_IO_TASK_MGT_OVERRUN 0x00000027 +#define UNF_IO_DMA_ERROR 0x00000028 +#define UNF_IO_DIF_ERROR 0x00000029 +#define UNF_IO_NO_LPORT 0x0000002a +#define UNF_IO_NO_XCHG 0x0000002b +#define UNF_IO_SOFT_ERR 0x0000002c +#define UNF_IO_XCHG_ADD_ERROR 0x0000002d +#define UNF_IO_NO_LOGIN 0x0000002e +#define UNF_IO_NO_BUFFER 0x0000002f +#define UNF_IO_DID_ERROR 0x00000030 +#define UNF_IO_UNSUPPORT 0x00000031 +#define UNF_IO_NOREADY 0x00000032 +#define UNF_IO_NPORTID_REUSED 0x00000033 +#define UNF_IO_NPORT_HANDLE_REUSED 0x00000034 +#define UNF_IO_NO_NPORT_HANDLE 0x00000035 +#define UNF_IO_ABORT_BY_FW 0x00000036 +#define UNF_IO_ABORT_PORT_REMOVING 0x00000037 +#define UNF_IO_INCOMPLETE 0x00000038 +#define UNF_IO_DIF_REF_ERROR 0x00000039 +#define UNF_IO_DIF_GEN_ERROR 0x0000003a + +#define UNF_IO_ERREND 0xFFFFFFFF + +/* define bits */ +#define UNF_BIT(n) (0x1UL << (n)) +#define UNF_BIT_0 UNF_BIT(0) +#define UNF_BIT_1 UNF_BIT(1) +#define UNF_BIT_2 UNF_BIT(2) +#define UNF_BIT_3 UNF_BIT(3) +#define UNF_BIT_4 UNF_BIT(4) +#define UNF_BIT_5 UNF_BIT(5) + +struct buff_list_s { + u8 *vaddr; + dma_addr_t paddr; +}; + +struct buf_describe_s { + struct buff_list_s *buflist; + u32 buf_size; + u32 buf_num; +}; + +#define BUF_LIST_PAGE_SIZE (PAGE_SIZE << 8) + +/* Echo macro define */ +#define ECHO_MG_VERSION_LOCAL 1 +#define ECHO_MG_VERSION_REMOTE 2 + +/* save hba info macro define */ +#define SAVE_PORT_INFO_LEN 1016 + +#define UNF_GET_NAME_HIGH_WORD(v_name) \ + (((v_name) >> 32) & 0xffffffff) +#define UNF_GET_NAME_LOW_WORD(v_name) \ + ((v_name) & 0xffffffff) + +#define UNF_FIRST_LPORT_ID_MASK 0xffffff00 +#define HIFC_MAX_COUNTER_TYPE 128 + +#define UNF_EVENT_ASYN 0 +#define UNF_EVENT_SYN 1 +#define UNF_GLOBAL_EVENT_ASYN 2 +#define UNF_GLOBAL_EVENT_SYN 3 + +/* define sfp err */ +#define UNF_SFP_PRESENT_FAIL 0x1 +#define UNF_SFP_POWER_FAIL 0x2 +#define UNF_9545_FAIL 0x3 + +/* obtain the values of board type and ID */ +#define UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(port_id) \ + (((port_id) & 0x00FF00) >> 8) + +#define UNF_FC_SERVER_BOARD_8_G 13 /* 8G mode */ +#define UNF_FC_SERVER_BOARD_16_G 7 /* 16G mode */ +#define UNF_FC_SERVER_BOARD_32_G 6 /* 32G mode */ + +#define UNF_PORT_TYPE_FC_QSFP 1 +#define UNF_PORT_TYPE_FC_SFP 0 +#define UNF_PORT_UNGRADE_FW_RESET_ACTIVE 0 +#define UNF_PORT_UNGRADE_FW_RESET_INACTIVE 1 + +#ifndef __BIG_ENDIAN__ +#define __BIG_ENDIAN__ 0x4321 +#endif + +#ifndef __LITTLE_ENDIAN__ +#define __LITTLE_ENDIAN__ 0x1234 +#endif + +#ifdef __BYTE_ORDER__ +#undef __BYTE_ORDER__ +#endif +#define __BYTE_ORDER__ __LITTLE_ENDIAN__ + +#ifndef INVALID_VALUE64 +#define INVALID_VALUE64 0xFFFFFFFFFFFFFFFFULL +#endif /* INVALID_VALUE64 */ + +#ifndef INVALID_VALUE32 +#define INVALID_VALUE32 0xFFFFFFFF +#endif /* INVALID_VALUE32 */ + +#ifndef INVALID_VALUE16 +#define INVALID_VALUE16 0xFFFF +#endif /* INVALID_VALUE16 */ + +#ifndef INVALID_VALUE8 +#define INVALID_VALUE8 0xFF +#endif /* INVALID_VALUE8 */ + +#ifndef RETURN_OK +#define RETURN_OK 0 +#endif + +#ifndef RETURN_ERROR +#define RETURN_ERROR (~0) +#endif +#define UNF_RETURN_ERROR (~0) + +#ifndef UNF_RETURN_NOT_SUPPORT +#define UNF_RETURN_NOT_SUPPORT (2) +#endif + +enum int_e { + UNF_FALSE = 0, + UNF_TRUE = 1 +}; + +#define DRV_DIF_CRC_ERR 0x1001 +#define DRV_DIF_LBA_ERR 0x1002 +#define DRV_DIF_APP_ERR 0x1003 + +#define UNF_SCSI_SENSE_DATA_LEN SCSI_SENSE_DATA_LEN + +/* RPort Management information related to Rport, + * only used at the boundary between common and lowlevel + */ +struct unf_rport_info_s { + unsigned int local_nport_id; + unsigned int nport_id; + unsigned int rport_index; + unsigned long long port_name; + unsigned char rsvd0[3]; +}; + +struct unf_cfg_item_s { + char *name; + unsigned int min_value; + unsigned int default_value; + unsigned int max_value; +}; + +struct unf_port_params_s { + unsigned int ra_tov; + unsigned int ed_tov; +}; + +/* get wwpn adn wwnn */ +struct unf_get_chip_info_argout { + unsigned char board_type; + unsigned long long wwpn; + unsigned long long wwnn; + unsigned long long sys_mac; +}; + +/* get sfp info: present and speed */ +struct unf_get_port_info_argout { + unsigned char sfp_speed; + unsigned char present; + unsigned char rsvd[2]; +}; + +/* SFF-8436(QSFP+) Rev 4.7 */ +struct sfp_plus_field_a0_s { + unsigned char identifier; + /* offset 1~2 */ + struct { + unsigned char reserved; + unsigned char status; + } status_indicator; + /* offset 3~21 */ + struct { + unsigned char rx_tx_los; + unsigned char tx_fault; + unsigned char all_resv; + + unsigned char ini_complete : 1; + unsigned char bit_resv : 3; + unsigned char temp_low_warn : 1; + unsigned char temp_high_warn : 1; + unsigned char temp_low_alarm : 1; + unsigned char temp_high_alarm : 1; + + unsigned char resv : 4; + unsigned char vcc_low_warn : 1; + unsigned char vcc_high_warn : 1; + unsigned char vcc_low_alarm : 1; + unsigned char vcc_high_alarm : 1; + + unsigned char resv8; + unsigned char rx_pow[2]; + unsigned char tx_bias[2]; + unsigned char reserved[6]; + unsigned char vendor_specifics[3]; + } interrupt_flag; + /* offset 22~33 */ + struct { + unsigned char temp[2]; + unsigned char reserved[2]; + unsigned char supply_vol[2]; + unsigned char reserveds[2]; + unsigned char vendor_specific[4]; + } module_monitors; + /* offset 34~81 */ + struct { + unsigned char rx_pow[8]; + unsigned char tx_bias[8]; + unsigned char reserved[16]; + unsigned char vendor_specific[16]; + } channel_monitor_val; + + /* offset 82~85 */ + unsigned char reserved[4]; + + /* offset 86~97 */ + struct { + /* 86~88 */ + unsigned char tx_disable; + unsigned char rx_rate_select; + unsigned char tx_rate_select; + + /* 89~92 */ + unsigned char rx_4_app_select; + unsigned char rx_3_app_select; + unsigned char rx_2_app_select; + unsigned char rx_1_app_select; + /* 93 */ + unsigned char power_override : 1; + unsigned char power_set : 1; + unsigned char reserved : 6; + + /* 94~97 */ + unsigned char tx_4_app_select; + unsigned char tx_3_app_select; + unsigned char tx_2_app_select; + unsigned char tx_1_app_select; + /* 98~99 */ + unsigned char auc_reserved[2]; + } control; + /* 100~106 */ + struct { + /* 100 */ + unsigned char mrx_1_os : 1; + unsigned char mrx_2_los : 1; + unsigned char mrx_3_los : 1; + unsigned char mrx_4_los : 1; + unsigned char mtx_1_los : 1; + unsigned char mtx_2_los : 1; + unsigned char mtx_3_los : 1; + unsigned char mtx_4_los : 1; + /* 101 */ + unsigned char mtx_1_fault : 1; + unsigned char mtx_2_fault : 1; + unsigned char mtx_3_fault : 1; + unsigned char mtx_4_fault : 1; + unsigned char reserved : 4; + /* 102 */ + unsigned char uc_reserved; + /* 103 */ + unsigned char mini_cmp_flag : 1; + unsigned char rsv : 3; + unsigned char mtemp_low_warn : 1; + unsigned char mtemp_high_warn : 1; + unsigned char mtemp_low_alarm : 1; + unsigned char mtemp_high_alarm : 1; + /* 104 */ + unsigned char rsv1 : 4; + unsigned char mvcc_low_warn : 1; + unsigned char mvcc_high_warn : 1; + unsigned char mvcc_low_alarm : 1; + unsigned char mvcc_high_alarm : 1; + /* 105~106 */ + unsigned char vendor_specific[2]; + } module_channel_mask_bit; + /* 107~118 */ + unsigned char auc_resv[12]; + /* 119~126 */ + unsigned char auc_reserved[8]; + /* 127 */ + unsigned char page_select; +}; + +/* page 00 */ +struct sfp_plus_field_00_s { + /* 128~191 */ + struct { + unsigned char id; + unsigned char id_ext; + unsigned char connector; + unsigned char speci_com[6]; + unsigned char mode; + unsigned char speed; + unsigned char encoding; + unsigned char br_nominal; + unsigned char ext_rate_select_com; + unsigned char length_smf; + unsigned char length_om3; + unsigned char length_om2; + unsigned char length_om1; + unsigned char length_copper; + unsigned char device_tech; + unsigned char vendor_name[16]; + unsigned char ex_module; + unsigned char vendor_oui[3]; + unsigned char vendor_pn[16]; + unsigned char vendor_rev[2]; + /* Wave length or Copper cable Attenuation */ + unsigned char wave_or_copper_attenuation[2]; + unsigned char wave_length_toler[2]; /* Wavelength tolerance */ + unsigned char max_temp; + unsigned char cc_base; + } base_id_fields; + /* 192~223 */ + struct { + unsigned char options[4]; + unsigned char vendor_sn[16]; + unsigned char date_code[8]; + unsigned char diagn_monit_type; + unsigned char enhance_opt; + unsigned char uc_reserved; + unsigned char ccext; + } ext_id_fields; + /* 224~255 */ + unsigned char vendor_spec_eeprom[32]; +}; + +/* page 01 */ +struct sfp_field_01_s { + unsigned char optiona_l01[128]; +}; + +/* page 02 */ +struct sfp_field_02_s { + unsigned char optiona_l02[128]; +}; + +/* page 03 */ +struct sfp_field_03_s { + unsigned char temp_high_alarm[2]; + unsigned char temp_low_alarm[2]; + unsigned char temp_high_warn[2]; + unsigned char temp_low_warn[2]; + + unsigned char reserved1[8]; + + unsigned char vcc_high_alarm[2]; + unsigned char vcc_low_alarm[2]; + unsigned char vcc_high_warn[2]; + unsigned char vcc_low_warn[2]; + + unsigned char reserved2[8]; + unsigned char vendor_specific1[16]; + + unsigned char pow_high_alarm[2]; + unsigned char pow_low_alarm[2]; + unsigned char pow_high_warn[2]; + unsigned char pow_low_warn[2]; + + unsigned char bias_high_alarm[2]; + unsigned char bias_low_alarm[2]; + unsigned char bias_high_warn[2]; + unsigned char bias_low_warn[2]; + + unsigned char tx_power_high_alarm[2]; + unsigned char tx_power_low_alarm[2]; + unsigned char reserved3[4]; + + unsigned char reserved4[8]; + + unsigned char vendor_specific2[16]; + unsigned char reserved5[2]; + unsigned char vendor_specific3[12]; + unsigned char rx_ampl[2]; + unsigned char rx_tx_sq_disable; + unsigned char rx_output_disable; + unsigned char chan_monit_mask[12]; + unsigned char reserved6[2]; + +}; + +struct sfp_plus_info_s { + struct sfp_plus_field_a0_s sfp_plus_info_a0; + struct sfp_plus_field_00_s sfp_plus_info_00; + struct sfp_field_01_s sfp_plus_info_01; + struct sfp_field_02_s sfp_plus_info_02; + struct sfp_field_03_s sfp_plus_info_03; +}; + +/* SFF-8472 Rev 10.4 */ +struct unf_sfp_data_field_a0_s { + /* Offset 0~63 */ + struct { + unsigned char id; + unsigned char id_ext; + unsigned char connector; + unsigned char atransceiver[8]; + unsigned char encoding; + /* Nominal signalling rate, units of 100MBd. */ + unsigned char br_nominal; + /* Type of rate select functionality */ + unsigned char rate_identifier; + /* Link length supported for single mode fiber, units of km */ + unsigned char length_smf_km; + /* Link length supported for single mode fiber, + * units of 100 m + */ + unsigned char length_smf; + /* Link length supported for 50 um OM2 fiber, units of 10 m */ + unsigned char length_smf_om2; + /* Link length supported for 62.5 um OM1 fiber, units of 10 m */ + unsigned char length_smf_om1; + /* Link length supported for copper or direct attach cable, + * units of m + */ + unsigned char length_cable; + /* Link length supported for 50 um OM3 fiber, units of 10 m */ + unsigned char length_om3; + unsigned char vendor_name[16]; /* ASCII */ + /* Code for electronic or optical compatibility */ + unsigned char transceiver; + unsigned char vendor_oui[3]; /* SFP vendor IEEE company ID */ + /* Part number provided by SFP vendor (ASCII) */ + unsigned char vendor_pn[16]; + /* Revision level for part number provided by vendor (ASCII) */ + unsigned char vendor_rev[4]; + /* Laser wavelength (Passive/Active Cable + * Specification Compliance) + */ + unsigned char wave_length[2]; + unsigned char unallocated; + /* Check code for Base ID Fields (addresses 0 to 62) */ + unsigned char cc_base; + } base_id_fields; + + /* Offset 64~95 */ + struct { + unsigned char options[2]; + unsigned char br_max; + unsigned char br_min; + unsigned char vendor_sn[16]; + unsigned char date_code[8]; + unsigned char diag_monitoring_type; + unsigned char enhanced_options; + unsigned char sff8472_compliance; + unsigned char cc_ext; + } ext_id_fields; + + /* Offset 96~255 */ + struct { + unsigned char vendor_spec_eeprom[32]; + unsigned char rsvd[128]; + } vendor_spec_id_fields; +}; + +struct unf_sfp_data_field_a2_s { + /* Offset 0~119 */ + struct { + /* 0~39 */ + struct { + unsigned char temp_alarm_high[2]; + unsigned char temp_alarm_low[2]; + unsigned char temp_warning_high[2]; + unsigned char temp_warning_low[2]; + + unsigned char vcc_alarm_high[2]; + unsigned char vcc_alarm_low[2]; + unsigned char vcc_warning_high[2]; + unsigned char vcc_warning_low[2]; + + unsigned char bias_alarm_high[2]; + unsigned char bias_alarm_low[2]; + unsigned char bias_warning_high[2]; + unsigned char bias_warning_low[2]; + + unsigned char tx_alarm_high[2]; + unsigned char tx_alarm_low[2]; + unsigned char tx_warning_high[2]; + unsigned char tx_warning_low[2]; + + unsigned char rx_alarm_high[2]; + unsigned char rx_alarm_low[2]; + unsigned char rx_warning_high[2]; + unsigned char rx_warning_low[2]; + } alarm_warn_th; + + unsigned char unallocated0[16]; + unsigned char ext_cal_constants[36]; + unsigned char unallocated1[3]; + unsigned char cc_dmi; + + /* 96~105 */ + struct { + unsigned char temp[2]; + unsigned char vcc[2]; + unsigned char tx_bias[2]; + unsigned char tx_power[2]; + unsigned char rx_power[2]; + } diag; + + unsigned char unallocated2[4]; + + struct { + unsigned char data_rdy_bar_state : 1; + unsigned char rx_los : 1; + unsigned char tx_fault_state : 1; + unsigned char soft_rate_select_state : 1; + unsigned char rate_select_state : 1; + unsigned char rs_state : 1; + unsigned char soft_tx_disable_select : 1; + unsigned char tx_disable_state : 1; + } status_ctrl; + unsigned char rsvd; + + /* 112~113 */ + struct { + /* 112 */ + unsigned char tx_alarm_low : 1; + unsigned char tx_alarm_high : 1; + unsigned char tx_bias_alarm_low : 1; + unsigned char tx_bias_alarm_high : 1; + unsigned char vcc_alarm_low : 1; + unsigned char vcc_alarm_high : 1; + unsigned char temp_alarm_low : 1; + unsigned char temp_alarm_high : 1; + + /* 113 */ + unsigned char rsvd : 6; + unsigned char rx_alarm_low : 1; + unsigned char rx_alarm_high : 1; + } alarm; + + unsigned char unallocated3[2]; + + /* 116~117 */ + struct { + /* 116 */ + unsigned char tx_warn_lo : 1; + unsigned char tx_warn_hi : 1; + unsigned char bias_warn_lo : 1; + unsigned char bias_warn_hi : 1; + unsigned char vcc_warn_lo : 1; + unsigned char vcc_warn_hi : 1; + unsigned char temp_warn_lo : 1; + unsigned char temp_warn_hi : 1; + + /* 117 */ + unsigned char rsvd : 6; + unsigned char rx_warn_lo : 1; + unsigned char rx_warn_hi : 1; + } warning; + + unsigned char ext_status_and_ctrl[2]; + } diag; + + /* Offset 120~255 */ + struct { + unsigned char vendor_spec[8]; + unsigned char user_eeprom[120]; + unsigned char vendor_ctrl[8]; + } general_use_fields; +}; + +struct unf_sfp_info_s { + struct unf_sfp_data_field_a0_s sfp_info_a0; + struct unf_sfp_data_field_a2_s sfp_info_a2; +}; + +union unf_sfp_eeprome_info { + struct unf_sfp_info_s sfp_info; + struct sfp_plus_info_s sfp_plus_info; +}; + +/* sfp info end */ +struct unf_lport_sfp_info { + unsigned int status; + union unf_sfp_eeprome_info sfp_eeprom_info; +}; + +struct unf_err_code_s { + unsigned int loss_of_signal_count; + unsigned int bad_rx_char_count; + unsigned int loss_of_sync_count; + unsigned int link_fail_count; + unsigned int rx_eo_fa_count; + unsigned int dis_frame_count; + unsigned int bad_crc_count; + unsigned int proto_error_count; +}; + +/* config file */ +enum unf_scsi_mode_e { + UNF_PORT_MODE_UNKNOWN = 0x00, + UNF_PORT_MODE_TGT = 0x10, + UNF_PORT_MODE_INI = 0x20, + UNF_PORT_MODE_BOTH = 0x30 +}; + +enum unf_port_upgrade_e { + UNF_PORT_UNSUPPORT_UPGRADE_REPORT = 0x00, + UNF_PORT_SUPPORT_UPGRADE_REPORT = 0x01, + UNF_PORT_UPGRADE_BUTT +}; + +#define UNF_BYTES_OF_DWORD 0x4 +static inline void __attribute__((unused)) unf_big_end_to_cpu( + unsigned char *v_buffer, unsigned int v_size) +{ + unsigned int *buffer = NULL; + unsigned int word_sum = 0; + unsigned int i = 0; + + if (!v_buffer) + return; + + buffer = (unsigned int *)v_buffer; + + /* byte to word */ + if (v_size % UNF_BYTES_OF_DWORD == 0) + word_sum = v_size / UNF_BYTES_OF_DWORD; + else + return; + + /* word to byte */ + while (i < word_sum) { + *buffer = be32_to_cpu(*buffer); + buffer++; + i++; + } +} + +static inline void __attribute__((unused)) unf_cpu_to_big_end( + void *v_buffer, unsigned int v_size) +{ +#define DWORD_BIT 32 +#define BYTE_BIT 8 + unsigned int *buffer = NULL; + unsigned int word_sum = 0; + unsigned int i = 0; + unsigned int tmp = 0; + + if (!v_buffer) + return; + + buffer = (unsigned int *)v_buffer; + + /* byte to dword */ + word_sum = v_size / 4; + + /* dword to byte */ + while (i < word_sum) { + *buffer = cpu_to_be32(*buffer); + buffer++; + i++; + } + + if (v_size % 4) { + tmp = cpu_to_be32(*buffer); + tmp = tmp >> (DWORD_BIT - (v_size % 4) * BYTE_BIT); + memcpy(buffer, &tmp, (v_size % 4)); + } +} + +#define UNF_FUNCTION_RETURN_CHECK(ret, dstlen) \ + do { \ + if (((ret) <= 0) || ((ret) >= (dstlen))) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_REG_ATT, UNF_ERR, \ + "function return (%d) check invalid, dst len(%d).", \ + (ret), (dstlen)); \ + } \ + } while (0) + +#define UNF_TOP_AUTO_MASK 0x0f + +#define UNF_NORMAL_MODE 0 +#define UNF_SET_NOMAL_MODE(mode) (mode = UNF_NORMAL_MODE) + +/* + * SCSI status + */ +#define SCSI_CHECK_CONDITION 0x02 + +enum unf_act_topo_e { + UNF_ACT_TOP_PUBLIC_LOOP = 0x1, + UNF_ACT_TOP_PRIVATE_LOOP = 0x2, + UNF_ACT_TOP_P2P_DIRECT = 0x4, + UNF_ACT_TOP_P2P_FABRIC = 0x8, + UNF_TOP_LOOP_MASK = 0x03, + UNF_TOP_P2P_MASK = 0x0c, + UNF_TOP_FCOE_MASK = 0x30, + UNF_ACT_TOP_UNKNOWN +}; + +#define UNF_FL_PORT_LOOP_ADDR 0x00 + +#define UNF_FC_PROTOCOL_TYPE 0x100 + +#define UNF_LOOP_ROLE_MASTER_OR_SLAVE 0x0 + +#define UNF_TOU16_CHECK(dest, src, over_action) \ + do { \ + if (unlikely((src) > 0xFFFF)) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, \ + UNF_ERR, "ToU16 error, src 0x%x ", (src)); \ + over_action; \ + } \ + ((dest) = (unsigned short)(src)); \ + } while (0) + +#define UNF_PORT_SPEED_AUTO 0 +#define UNF_PORT_SPEED_2_G 2 +#define UNF_PORT_SPEED_4_G 4 +#define UNF_PORT_SPEED_8_G 8 +#define UNF_PORT_SPEED_10_G 10 +#define UNF_PORT_SPEED_16_G 16 +#define UNF_PORT_SPEED_32_G 32 + +#define UNF_PORT_SPEED_UNKNOWN (~0) +#define UNF_PORT_SFP_SPEED_ERR 0xFF + +#define UNF_FW_VERSION_LEN 32 +#define UNF_HW_VERSION_LEN 32 + +/* max frame size */ +#define UNF_MAX_FRAME_SIZE 2112 + +/* default */ +#define UNF_DEFAULT_FRAME_SIZE 2048 +#define UNF_DEFAULT_EDTOV 2000 +#define UNF_DEFAULT_RATOV 10000 +#define UNF_DEFAULT_FABRIC_RATOV 10000 +#define UNF_MAX_RETRY_COUNT 3 +#define UNF_DEFAULT_RRTOV (10000 + 500) /* FCP-4 10.4.10 */ +#define UNF_RRQ_MIN_TIMEOUT_INTERVAL 30000 +#define UNF_LOGO_TIMEOUT_INTERVAL 3000 +#define UNF_WRITE_RRQ_SENDERR_INTERVAL 3000 +#define UNF_REC_TOV 3000 + +#define UNF_WAIT_SEM_TIMEOUT (5000UL) +#define UNF_WAIT_ABTS_RSP_TIMEOUT (20000UL) + +#define UNF_INI_RRQ_REDUNDANT_TIME 500 +#define UNF_INI_ELS_REDUNDANT_TIME 2000 + +/* ELS command values */ +#define UNF_ELS_CMND_HIGH_MASK 0xff000000 +#define UNF_ELS_CMND_RJT 0x01000000 +#define UNF_ELS_CMND_ACC 0x02000000 +#define UNF_ELS_CMND_PLOGI 0x03000000 +#define UNF_ELS_CMND_FLOGI 0x04000000 +#define UNF_ELS_CMND_LOGO 0x05000000 +#define UNF_ELS_CMND_RLS 0x0F000000 +#define UNF_ELS_CMND_ECHO 0x10000000 +#define UNF_ELS_CMND_REC 0x13000000 +#define UNF_ELS_CMND_RRQ 0x12000000 +#define UNF_ELS_CMND_PRLI 0x20000000 +#define UNF_ELS_CMND_PRLO 0x21000000 +#define UNF_ELS_CMND_PDISC 0x50000000 +#define UNF_ELS_CMND_FDISC 0x51000000 +#define UNF_ELS_CMND_ADISC 0x52000000 +#define UNF_ELS_CMND_FAN 0x60000000 +#define UNF_ELS_CMND_RSCN 0x61000000 +#define UNF_FCP_CMND_SRR 0x14000000 +#define UNF_GS_CMND_SCR 0x62000000 + +#define UNF_PLOGI_VERSION_UPPER 0x20 +#define UNF_PLOGI_VERSION_LOWER 0x20 +#define UNF_PLOGI_CONCURRENT_SEQ 0x00FF +#define UNF_PLOGI_RO_CATEGORY 0x00FE +#define UNF_PLOGI_SEQ_PER_XCHG 0x0001 + +/* CT_IU pream defines */ +#define UNF_REV_NPORTID_INIT 0x01000000 +#define UNF_FSTYPE_OPT_INIT 0xfc020000 +#define UNF_FSTYPE_RFT_ID 0x02170000 +#define UNF_FSTYPE_GID_PT 0x01A10000 +#define UNF_FSTYPE_GID_FT 0x01710000 +#define UNF_FSTYPE_RFF_ID 0x021F0000 +#define UNF_FSTYPE_GFF_ID 0x011F0000 +#define UNF_FSTYPE_GNN_ID 0x01130000 +#define UNF_FSTYPE_GPN_ID 0x01120000 + +#define UNF_CT_IU_RSP_MASK 0xffff0000 +#define UNF_CT_IU_REASON_MASK 0x00ff0000 +#define UNF_CT_IU_EXPLAN_MASK 0x0000ff00 +#define UNF_CT_IU_REJECT 0x80010000 +#define UNF_CT_IU_ACCEPT 0x80020000 + +#define UNF_FABRIC_FULL_REG 0x00000003 + +#define UNF_FC4_SCSI_BIT8 0x00000100 +#define UNF_FC4_FCP_TYPE 0x00000008 +#define UNF_FRAG_REASON_VENDOR 0 + +/* GID_PT, GID_FT */ +#define UNF_GID_PT_TYPE 0x7F000000 +#define UNF_GID_FT_TYPE 0x00000008 + +/* + * FC4 defines + */ +#define UNF_FC4_FRAME_PAGE_SIZE 0x10 +#define UNF_FC4_FRAME_PAGE_SIZE_SHIFT 16 + +#define UNF_FC4_FRAME_PARM_0_FCP 0x08000000 +#define UNF_FC4_FRAME_PARM_0_I_PAIR 0x00002000 +#define UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE 0x00000100 + +#define UNF_FC4_FRAME_PARM_3_INI 0x00000020 +#define UNF_FC4_FRAME_PARM_3_TGT 0x00000010 +#define UNF_FC4_FRAME_PARM_3_R_XFER_DIS 0x00000002 +#define UNF_FC4_FRAME_PARM_3_CONF_ALLOW 0x00000080 /* bit 7 */ +#define UNF_FC4_FRAME_PARM_3_REC_SUPPORT 0x00000400 /* bit 10 */ +#define UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT 0x00000200 /* bit 9 */ +#define UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT 0x00000100 /* bit 8 */ +#define UNF_FC4_FRAME_PARM_3_CONF_ALLOW 0x00000080 /* bit 7 */ + + +#define UNF_GFF_ACC_MASK 0xFF000000 + +/* Reject CT_IU Reason Codes */ +#define UNF_CTIU_RJT_MASK 0xffff0000 +#define UNF_CTIU_RJT_INVALID_COMMAND 0x00010000 +#define UNF_CTIU_RJT_INVALID_VERSION 0x00020000 +#define UNF_CTIU_RJT_LOGIC_ERR 0x00030000 +#define UNF_CTIU_RJT_INVALID_SIZE 0x00040000 +#define UNF_CTIU_RJT_LOGIC_BUSY 0x00050000 +#define UNF_CTIU_RJT_PROTOCOL_ERR 0x00070000 +#define UNF_CTIU_RJT_UNABLE_PERFORM 0x00090000 +#define UNF_CTIU_RJT_NOT_SUPPORTED 0x000B0000 + +/* FS_RJT Reason code explanations, FC-GS-2 6.5 */ +#define UNF_CTIU_RJT_EXP_MASK 0x0000FF00 +#define UNF_CTIU_RJT_EXP_NO_ADDTION 0x00000000 +#define UNF_CTIU_RJT_EXP_PORTID_NO_REG 0x00000100 +#define UNF_CTIU_RJT_EXP_PORTNAME_NO_REG 0x00000200 +#define UNF_CTIU_RJT_EXP_NODENAME_NO_REG 0x00000300 +#define UNF_CTIU_RJT_EXP_FC4TYPE_NO_REG 0x00000700 +#define UNF_CTIU_RJT_EXP_PORTTYPE_NO_REG 0x00000A00 + +/* + * LS_RJT defines + */ +#define UNF_FC_LS_RJT_REASON_MASK 0x00ff0000 + +/* + * LS_RJT reason code defines + */ +#define UNF_LS_OK 0x00000000 +#define UNF_LS_RJT_INVALID_COMMAND 0x00010000 +#define UNF_LS_RJT_LOGICAL_ERROR 0x00030000 +#define UNF_LS_RJT_BUSY 0x00050000 +#define UNF_LS_RJT_PROTOCOL_ERROR 0x00070000 +#define UNF_LS_RJT_REQUEST_DENIED 0x00090000 +#define UNF_LS_RJT_NOT_SUPPORTED 0x000b0000 +#define UNF_LS_RJT_CLASS_ERROR 0x000c0000 + +/* + * LS_RJT code explanation + */ +#define UNF_LS_RJT_NO_ADDITIONAL_INFO 0x00000000 +#define UNF_LS_RJT_INV_DATA_FIELD_SIZE 0x00000700 +#define UNF_LS_RJT_INV_COMMON_SERV_PARAM 0x00000F00 +#define UNF_LS_RJT_INVALID_OXID_RXID 0x00001700 +#define UNF_LS_RJT_COMMAND_IN_PROGRESS 0x00001900 +#define UNF_LS_RJT_INSUFFICIENT_RESOURCES 0x00002900 +#define UNF_LS_RJT_COMMAND_NOT_SUPPORTED 0x00002C00 +#define UNF_LS_RJT_UNABLE_TO_SUPLY_REQ_DATA 0x00002A00 +#define UNF_LS_RJT_INVALID_PAYLOAD_LENGTH 0x00002D00 + +#define UNF_P2P_LOCAL_NPORT_ID 0x000000EF +#define UNF_P2P_REMOTE_NPORT_ID 0x000000D6 + +#define UNF_BBCREDIT_MANAGE_NFPORT 0 +#define UNF_BBCREDIT_MANAGE_LPORT 1 +#define UNF_BBCREDIT_LPORT 0 +#define UNF_CONTIN_INCREASE_SUPPORT 1 +#define UNF_CLASS_VALID 1 +#define UNF_CLASS_INVALID 0 +#define UNF_NOT_MEANINGFUL 0 +#define UNF_NO_SERVICE_PARAMS 0 +#define UNF_CLEAN_ADDRESS_DEFAULT 0 +#define UNF_PRIORITY_ENABLE 1 +#define UNF_PRIORITY_DISABLE 0 +#define UNF_SEQUEN_DELIVERY_REQ 1 /* Sequential delivery requested */ + +/* RSCN */ +#define UNF_RSCN_PORT_ADDR 0x0 +#define UNF_RSCN_AREA_ADDR_GROUP 0x1 +#define UNF_RSCN_DOMAIN_ADDR_GROUP 0x2 +#define UNF_RSCN_FABRIC_ADDR_GROUP 0x3 + +#define UNF_GET_RSCN_PLD_LEN(v_cmnd) ((v_cmnd)&0x0000ffff) +#define UNF_RSCN_PAGE_LEN 0x4 + +#define UNF_PORT_LINK_UP 0x0000 +#define UNF_PORT_LINK_DOWN 0x0001 +#define UNF_PORT_RESET_START 0x0002 +#define UNF_PORT_RESET_END 0x0003 +#define UNF_PORT_LINK_UNKNOWN 0x0004 +#define UNF_PORT_NOP 0x0005 +#define UNF_PORT_CORE_FATAL_ERROR 0x0006 +#define UNF_PORT_CORE_UNRECOVERABLE_ERROR 0x0007 +#define UNF_PORT_CORE_RECOVERABLE_ERROR 0x0008 +#define UNF_PORT_UPDATE_PROCESS 0x000b +#define UNF_PORT_DEBUG_DUMP 0x000c +#define UNF_PORT_GET_FWLOG 0x000d +#define UNF_PORT_CLEAN_DONE 0x000e +#define UNF_PORT_BEGIN_REMOVE 0x000f +#define UNF_PORT_RELEASE_RPORT_INDEX 0x0010 +#define UNF_PORT_ABNORMAL_RESET 0x0012 + +#define UNF_READ 0 +#define UNF_WRITE 1 +#define UNF_READ_64 2 +#define UNF_WRITE_64 3 +/* + *SCSI begin + */ +#define SCSIOPC_TEST_UNIT_READY 0x00 +#define SCSIOPC_INQUIRY 0x12 +#define SCSIOPC_MODE_SENSE_6 0x1A +#define SCSIOPC_MODE_SENSE_10 0x5A +#define SCSIOPC_MODE_SELECT_6 0x15 +#define SCSIOPC_RESERVE 0x16 +#define SCSIOPC_RELEASE 0x17 +#define SCSIOPC_START_STOP_UNIT 0x1B +#define SCSIOPC_READ_CAPACITY_10 0x25 +#define SCSIOPC_READ_CAPACITY_16 0x9E +#define SCSIOPC_READ_6 0x08 +#define SCSIOPC_READ_10 0x28 +#define SCSIOPC_READ_12 0xA8 +#define SCSIOPC_READ_16 0x88 +#define SCSIOPC_WRITE_6 0x0A +#define SCSIOPC_WRITE_10 0x2A +#define SCSIOPC_WRITE_12 0xAA +#define SCSIOPC_WRITE_16 0x8A +#define SCSIOPC_WRITE_VERIFY 0x2E +#define SCSIOPC_VERIFY_10 0x2F +#define SCSIOPC_VERIFY_12 0xAF +#define SCSIOPC_VERIFY_16 0x8F +#define SCSIOPC_REQUEST_SENSE 0x03 +#define SCSIOPC_REPORT_LUN 0xA0 +#define SCSIOPC_FORMAT_UNIT 0x04 +#define SCSIOPC_SEND_DIAGNOSTIC 0x1D +#define SCSIOPC_WRITE_SAME_10 0x41 +#define SCSIOPC_WRITE_SAME_16 0x93 +#define SCSIOPC_READ_BUFFER 0x3C +#define SCSIOPC_WRITE_BUFFER 0x3B + +#define SCSIOPC_LOG_SENSE 0x4D +#define SCSIOPC_MODE_SELECT_10 0x55 +#define SCSIOPC_SYNCHRONIZE_CACHE_10 0x35 +#define SCSIOPC_SYNCHRONIZE_CACHE_16 0x91 +#define SCSIOPC_WRITE_AND_VERIFY_10 0x2E +#define SCSIOPC_WRITE_AND_VERIFY_12 0xAE +#define SCSIOPC_WRITE_AND_VERIFY_16 0x8E +#define SCSIOPC_READ_MEDIA_SERIAL_NUMBER 0xAB +#define SCSIOPC_REASSIGN_BLOCKS 0x07 +#define SCSIOPC_ATA_PASSTHROUGH_16 0x85 +#define SCSIOPC_ATA_PASSTHROUGH_12 0xa1 + +/* + * SCSI end + */ +#define IS_READ_COMMAND(opcode) ((opcode) == SCSIOPC_READ_6 || \ + (opcode) == SCSIOPC_READ_10 || \ + (opcode) == SCSIOPC_READ_12 || \ + (opcode) == SCSIOPC_READ_16) +#define IS_WRITE_COMMAND(opcode) ((opcode) == SCSIOPC_WRITE_6 || \ + (opcode) == SCSIOPC_WRITE_10 || \ + (opcode) == SCSIOPC_WRITE_12 || \ + (opcode) == SCSIOPC_WRITE_16) + +#define FCP_RSP_LEN_VALID_MASK 0x1 +#define FCP_SNS_LEN_VALID_MASK 0x2 +#define FCP_RESID_OVER_MASK 0x4 +#define FCP_RESID_UNDER_MASK 0x8 +#define FCP_CONF_REQ_MASK 0x10 +#define FCP_SCSI_STATUS_GOOD 0x0 + +#define UNF_DELAYED_WORK_SYNC(v_ret, v_pord_id, v_work, v_work_symb) \ + do { \ + if (!cancel_delayed_work_sync(v_work)) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, \ + UNF_INFO, \ + "[info]LPort or RPort(0x%x) %s worker can't destroy, or no worker", \ + v_pord_id, v_work_symb); \ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = RETURN_OK; \ + } \ + } while (0) + +#define UNF_DELAYED_WORK(v_ret, v_pord_id, v_work, v_work_symb) \ + do { \ + if (!cancel_delayed_work(v_work)) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, \ + UNF_MAJOR, \ + "LPort or RPort(0x%x) %s worker can't destroy, or no worker.", \ + v_pord_id, v_work_symb); \ + v_ret = UNF_RETURN_ERROR; \ + } else { \ + v_ret = RETURN_OK; \ + } \ + } while (0) + +#define UNF_DELAYED_WORK_CONFUSED(v_ret, v_pord_id, v_work, v_work_symb) \ + do { \ + if (in_interrupt()) { \ + UNF_DELAYED_WORK(v_ret, v_pord_id, v_work, \ + v_work_symb) \ + } else { \ + UNF_DELAYED_WORK_SYNC(v_ret, v_pord_id, v_work, \ + v_work_symb) \ + } \ + } while (0) + +#define UNF_GET_IO_XCHG_TAG(v_pkg) \ + ((unsigned short)((v_pkg)->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX])) + +#define UNF_GET_SFS_ENTRY(v_pkg) ((union unf_sfs_u *)(void *) \ + (((struct unf_frame_pkg_s *)v_pkg)->unf_cmnd_pload_bl.buffer_ptr)) + +/* FLOGI */ +#define UNF_GET_FLOGI_PAYLOAD(v_pkg) (&(((union unf_sfs_u *) \ + (UNF_GET_SFS_ENTRY(v_pkg)))->flogi.flogi_payload)) +#define UNF_FLOGI_PAYLOAD_LEN sizeof(struct unf_flogi_payload_s) + +/* FLOGI ACC */ +#define UNF_GET_FLOGI_ACC_PAYLOAD(v_pkg) (&(((union unf_sfs_u *) \ + (UNF_GET_SFS_ENTRY(v_pkg)))->flogi_acc.flogi_payload)) +#define UNF_FLOGI_ACC_PAYLOAD_LEN sizeof(struct unf_flogi_payload_s) + +/* FDISC */ +#define UNF_FDISC_PAYLOAD_LEN UNF_FLOGI_PAYLOAD_LEN +#define UNF_FDISC_ACC_PAYLOAD_LEN UNF_FLOGI_ACC_PAYLOAD_LEN + +/* PLOGI */ +#define UNF_GET_PLOGI_PAYLOAD(v_pkg) \ + (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->plogi.payload)) +#define UNF_PLOGI_PAYLOAD_LEN sizeof(struct unf_plogi_payload_s) + +/* PLOGI ACC */ +#define UNF_GET_PLOGI_ACC_PAYLOAD(v_pkg) \ + (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->plogi_acc.payload)) +#define UNF_PLOGI_ACC_PAYLOAD_LEN sizeof(struct unf_plogi_payload_s) + +/* LOGO */ +#define UNF_LOGO_PAYLOAD_LEN sizeof(struct unf_logo_payload_s) + +/* ECHO */ +#define UNF_GET_ECHO_PAYLOAD(v_pkg) \ + (((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->echo.echo_pld) + +/* ECHO PHYADDR */ +#define UNF_GET_ECHO_PAYLOAD_PHYADDR(v_pkg) \ + (((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->echo.phy_echo_addr) + +#define UNF_ECHO_PAYLOAD_LEN sizeof(struct unf_echo_payload_s) + +/* RLS */ +#define UNF_RLS_PAYLOAD_LEN sizeof(struct unf_rls_payload_s) + +/* ECHO ACC */ +#define UNF_ECHO_ACC_PAYLOAD_LEN sizeof(struct unf_echo_payload_s) +/* REC */ +#define UNF_REC_PAYLOAD_LEN sizeof(struct unf_rec_pld_s) + +/* REC ACC */ +#define UNF_GET_REC_ACC_PAYLOAD(v_pkg) \ + (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->els_acc.cmnd)) + +#define UNF_REC_ACC_PAYLOAD_LEN (sizeof(struct unf_els_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/* RRQ */ +#define UNF_RRQ_PAYLOAD_LEN (sizeof(struct unf_rrq_s) - \ + sizeof(struct unf_fchead_s)) + +/* PRLI */ +#define UNF_PRLI_PAYLOAD_LEN sizeof(struct unf_pril_payload_s) + +/* PRLI ACC */ +#define UNF_PRLI_ACC_PAYLOAD_LEN sizeof(struct unf_pril_payload_s) + +/* PRLO */ +#define UNF_PRLO_PAYLOAD_LEN sizeof(struct unf_pril_payload_s) + +#define UNF_PRLO_ACC_PAYLOAD_LEN sizeof(struct unf_pril_payload_s) + +/* PDISC */ +#define UNF_PDISC_PAYLOAD_LEN sizeof(struct unf_plogi_payload_s) + +/* PDISC ACC */ +#define UNF_PDISC_ACC_PAYLOAD_LEN sizeof(struct unf_plogi_payload_s) + +/* ADISC */ +#define UNF_ADISC_PAYLOAD_LEN sizeof(struct unf_adisc_payload_s) + +/* ADISC ACC */ +#define UNF_ADISC_ACC_PAYLOAD_LEN sizeof(struct unf_adisc_payload_s) + +/* RSCN ACC */ +#define UNF_GET_RSCN_ACC_PAYLOAD(v_pkg) \ + (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(v_pkg)))->els_acc.cmnd)) +#define UNF_RSCN_ACC_PAYLOAD_LEN (sizeof(struct unf_els_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/* LOGO ACC */ +#define UNF_LOGO_ACC_PAYLOAD_LEN (sizeof(struct unf_els_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/* RRQ ACC */ +#define UNF_RRQ_ACC_PAYLOAD_LEN (sizeof(struct unf_els_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/* RLS ACC */ +#define UNF_RLS_ACC_PAYLOAD_LEN (sizeof(struct unf_rls_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/* GPN_ID */ +#define UNF_GPNID_PAYLOAD_LEN (sizeof(struct unf_gpnid_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_GPNID_RSP_PAYLOAD_LEN (sizeof(struct unf_gpnid_rsp_s) - \ + sizeof(struct unf_fchead_s)) + +/* GNN_ID */ +#define UNF_GNNID_PAYLOAD_LEN (sizeof(struct unf_gnnid_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_GNNID_RSP_PAYLOAD_LEN (sizeof(struct unf_gnnid_rsp_s) - \ + sizeof(struct unf_fchead_s)) + +/* GFF_ID */ +#define UNF_GFFID_PAYLOAD_LEN (sizeof(struct unf_gffid_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_GFFID_RSP_PAYLOAD_LEN (sizeof(struct unf_gffid_rsp_s) - \ + sizeof(struct unf_fchead_s)) + +/* GID_FT/GID_PT */ +#define UNF_GET_GID_PAYLOAD(v_pkg) (&(((union unf_sfs_u *) \ + UNF_GET_SFS_ENTRY(v_pkg))->get_id.gid_req.ctiu_pream)) + +#define UNF_GID_PAYLOAD_LEN (sizeof(struct unf_ctiu_prem_s) + \ + sizeof(unsigned int)) + +#define UNF_GID_ACC_PAYLOAD_LEN sizeof(struct unf_gif_acc_pld_s) + +/* RFT_ID */ +#define UNF_RFTID_PAYLOAD_LEN (sizeof(struct unf_rftid_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_RFTID_RSP_PAYLOAD_LEN sizeof(struct unf_ctiu_prem_s) + +/* RFF_ID */ +#define UNF_RFFID_PAYLOAD_LEN (sizeof(struct unf_rffid_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_RFFID_RSP_PAYLOAD_LEN sizeof(struct unf_ctiu_prem_s) + +/* SRR */ +#define UNF_SRR_PAYLOAD_LEN \ + sizeof(struct unf_srr_payload_s) + +/* ACC&RJT */ +#define UNF_ELS_ACC_RJT_LEN (sizeof(struct unf_els_rjt_s) - \ + sizeof(struct unf_fchead_s)) + +/* SCR */ +#define UNF_SCR_PAYLOAD_LEN (sizeof(struct unf_scr_s) - \ + sizeof(struct unf_fchead_s)) + +#define UNF_SCR_RSP_PAYLOAD_LEN (sizeof(struct unf_els_acc_s) - \ + sizeof(struct unf_fchead_s)) + +/**********************************************************/ +#define UNF_GET_XCHG_TAG(v_pkg) (((struct unf_frame_pkg_s *) \ + v_pkg)->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) + +#define UNF_GET_SID(v_pkg) (((struct unf_frame_pkg_s *) \ + v_pkg)->frame_head.csctl_sid & UNF_NPORTID_MASK) +#define UNF_GET_DID(v_pkg) (((struct unf_frame_pkg_s *) \ + v_pkg)->frame_head.rctl_did & UNF_NPORTID_MASK) +#define UNF_GET_OXID(v_pkg) (((struct unf_frame_pkg_s *) \ + v_pkg)->frame_head.oxid_rxid >> 16) +#define UNF_GET_RXID(v_pkg) ((unsigned short)((struct unf_frame_pkg_s *) \ + v_pkg)->frame_head.oxid_rxid) +#define UNF_GET_XFER_LEN(v_pkg) (((struct unf_frame_pkg_s *)v_pkg)->transfer_len) + +/* ioc abort */ +#define UNF_GETXCHGALLOCTIME(v_pkg) \ + (((struct unf_frame_pkg_s *)v_pkg)->private[PKG_PRIVATE_XCHG_ALLOC_TIME]) +#define UNF_SET_XCHG_ALLOC_TIME(pkg, xchg) \ + (((struct unf_frame_pkg_s *)(pkg))->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = \ + (((struct unf_xchg_s *)(xchg))->private[PKG_PRIVATE_XCHG_ALLOC_TIME])) +#define UNF_SET_ABORT_INFO_IOTYPE(pkg, xchg) \ + (((struct unf_frame_pkg_s *)(pkg))->private[PKG_PRIVATE_XCHG_ABORT_INFO] |= \ + (((unsigned char)(((struct unf_xchg_s *)(xchg))->data_direction & 0x7))\ + << 2)) + +#define UNF_CHECK_NPORT_FPORT_BIT(els_payload) \ + (((struct unf_flogi_payload_s *)els_payload)->fabric_parms.co_parms.n_port) + +#define UNF_N_PORT 0 +#define UNF_F_PORT 1 + +#define UNF_GET_RA_TOV_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.r_a_tov) +#define UNF_GET_RT_TOV_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.r_t_tov) +#define UNF_GET_E_D_TOV_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.e_d_tov) +#define UNF_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.e_d_tov_resolution) +#define UNF_GET_BB_SC_N_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.bb_scn) +#define UNF_GET_BB_CREDIT_FROM_PARAMS(params) \ + (((struct unf_fabric_parms_s *)params)->co_parms.bb_credit) +enum unf_pcie_error_code_e { + UNF_PCIE_ERROR_NONE = 0, + UNF_PCIE_DATAPARITYDETECTED = 1, + UNF_PCIE_SIGNALTARGETABORT, + UNF_PCIE_RECEIVEDTARGETABORT, + UNF_PCIE_RECEIVEDMASTERABORT, + UNF_PCIE_SIGNALEDSYSTEMERROR, + UNF_PCIE_DETECTEDPARITYERROR, + UNF_PCIE_CORRECTABLEERRORDETECTED, + UNF_PCIE_NONFATALERRORDETECTED, + UNF_PCIE_FATALERRORDETECTED, + UNF_PCIE_UNSUPPORTEDREQUESTDETECTED, + UNF_PCIE_AUXILIARYPOWERDETECTED, + UNF_PCIE_TRANSACTIONSPENDING, + + UNF_PCIE_UNCORRECTINTERERRSTATUS, + UNF_PCIE_UNSUPPORTREQERRSTATUS, + UNF_PCIE_ECRCERRORSTATUS, + UNF_PCIE_MALFORMEDTLPSTATUS, + UNF_PCIE_RECEIVEROVERFLOWSTATUS, + UNF_PCIE_UNEXPECTCOMPLETESTATUS, + UNF_PCIE_COMPLETERABORTSTATUS, + UNF_PCIE_COMPLETIONTIMEOUTSTATUS, + UNF_PCIE_FLOWCTRLPROTOCOLERRSTATUS, + UNF_PCIE_POISONEDTLPSTATUS, + UNF_PCIE_SURPRISEDOWNERRORSTATUS, + UNF_PCIE_DATALINKPROTOCOLERRSTATUS, + UNF_PCIE_ADVISORYNONFATALERRSTATUS, + UNF_PCIE_REPLAYTIMERTIMEOUTSTATUS, + UNF_PCIE_REPLAYNUMROLLOVERSTATUS, + UNF_PCIE_BADDLLPSTATUS, + UNF_PCIE_BADTLPSTATUS, + UNF_PCIE_RECEIVERERRORSTATUS, + + UNF_PCIE_BUTT +}; + +#define UNF_DMA_HI32(a) (((a) >> 32) & 0xffffffff) +#define UNF_DMA_LO32(a) ((a) & 0xffffffff) + +#define UNF_WWN_LEN 8 +#define UNF_MAC_LEN 6 + +/* send BLS/ELS/BLS REPLY/ELS REPLY/GS/ */ +/* rcvd BLS/ELS/REQ DONE/REPLY DONE */ +#define UNF_PKG_BLS_REQ 0x0100 +#define UNF_PKG_BLS_REQ_DONE 0x0101 + +#define UNF_PKG_ELS_REQ 0x0200 + +#define UNF_PKG_ELS_REQ_DONE 0x0201 + +#define UNF_PKG_ELS_REPLY 0x0202 + +#define UNF_PKG_ELS_REPLY_DONE 0x0203 + +#define UNF_PKG_GS_REQ 0x0300 + +#define UNF_PKG_GS_REQ_DONE 0x0301 + +#define UNF_PKG_INI_IO 0x0500 +#define UNF_PKG_INI_RCV_TGT_RSP 0x0507 + +/* external sgl struct start */ +struct unf_esgl_page_s { + unsigned long long page_address; + dma_addr_t esgl_phyaddr; + unsigned int page_size; +}; + +struct unf_esgl_s { + struct list_head entry_esgl; + struct unf_esgl_page_s page; +}; + +/* external sgl struct end */ +struct unf_frame_payld_s { + unsigned char *buffer_ptr; + dma_addr_t buf_dma_addr; + unsigned int length; +}; + +enum pkg_private_index_e { + PKG_PRIVATE_LOWLEVEL_XCHG_ADD = 0, + PKG_PRIVATE_XCHG_HOT_POOL_INDEX = 1, /* Hot Pool Index */ + PKG_PRIVATE_XCHG_RPORT_INDEX = 2, /* RPort index */ + PKG_PRIVATE_XCHG_VP_INDEX = 3, /* VPort index */ + PKG_PRIVATE_RPORT_RX_SIZE, + PKG_PRIVATE_XCHG_TIMEER, + PKG_PRIVATE_XCHG_ALLOC_TIME, + PKG_PRIVATE_XCHG_ABORT_INFO, + PKG_PRIVATE_ECHO_CMD_SND_TIME, /* local send echo cmd time stamp */ + PKG_PRIVATE_ECHO_ACC_RCV_TIME, /* local receive echo acc time stamp */ + PKG_PRIVATE_ECHO_CMD_RCV_TIME, /* remote receive echo cmd time stamp */ + PKG_PRIVATE_ECHO_RSP_SND_TIME, /* remote send echo rsp time stamp */ + PKG_MAX_PRIVATE_DATA_SIZE +}; + +extern unsigned int dix_flag; +extern unsigned int dif_sgl_mode; +extern unsigned int dif_app_esc_check; +extern unsigned int dif_ref_esc_check; + +#define UNF_DIF_ACTION_NONE 0 + +enum unf_adm_dif_mode_e { + UNF_SWITCH_DIF_DIX = 0, + UNF_APP_REF_ESCAPE, + ALL_DIF_MODE = 20, +}; + +#define UNF_VERIFY_CRC_MASK (1 << 1) +#define UNF_VERIFY_APP_MASK (1 << 2) +#define UNF_VERIFY_LBA_MASK (1 << 3) + +#define UNF_REPLACE_CRC_MASK (1 << 8) +#define UNF_REPLACE_APP_MASK (1 << 9) +#define UNF_REPLACE_LBA_MASK (1 << 10) + +#define UNF_DIF_ACTION_MASK (0xff << 16) +#define UNF_DIF_ACTION_INSERT (0x1 << 16) +#define UNF_DIF_ACTION_VERIFY_AND_DELETE (0x2 << 16) +#define UNF_DIF_ACTION_VERIFY_AND_FORWARD (0x3 << 16) +#define UNF_DIF_ACTION_VERIFY_AND_REPLACE (0x4 << 16) + +#define UNF_DIF_ACTION_NO_INCREASE_REFTAG (0x1 << 24) + +#define UNF_DEFAULT_CRC_GUARD_SEED (0) +#define UNF_CAL_BLOCK_CNT(data_len, sector_size) ((data_len) / (sector_size)) + +#define UNF_DIF_DOUBLE_SGL (1 << 1) +#define UNF_DIF_SECTSIZE_4KB (1 << 2) +#define UNF_DIF_LBA_NONE_INCREASE (1 << 3) +#define UNF_DIF_TYPE3 (1 << 4) + +#define HIFC_DIF_APP_REF_ESC_NOT_CHECK 1 +#define HIFC_DIF_APP_REF_ESC_CHECK 0 + +enum unf_io_state_e { + UNF_INI_IO = 0, + UNF_TGT_XFER = 1, + UNF_TGT_RSP = 2 +}; + +#define UNF_PKG_LAST_RESPONSE 0 +#define UNF_PKG_NOT_LAST_RESPONSE 1 + +#define UNF_PKG_LAST_REQUEST 1 +#define UNF_PKG_NOT_LAST_REQUEST 0 + +struct unf_frame_pkg_s { + /* pkt type:BLS/ELS/FC4LS/CMND/XFER/RSP */ + unsigned int type; + unsigned int last_pkg_flag; + +#define UNF_FCP_RESPONSE_VALID 0x01 +#define UNF_FCP_SENSE_VALID 0x02 + /* resp and sense vailed flag */ + unsigned int response_and_sense_valid_flag; + unsigned int cmnd; + struct unf_fchead_s frame_head; + unsigned int entry_count; + void *xchg_contex; + unsigned int transfer_len; + unsigned int residus_len; + unsigned int status; + unsigned int status_sub_code; + enum unf_io_state_e io_state; + unsigned int qos_level; + + unsigned int private[PKG_MAX_PRIVATE_DATA_SIZE]; + + unsigned char byte_orders; + + struct unf_fcp_cmnd_s *fcp_cmnd; + struct unf_dif_control_info_s dif_control; + struct unf_frame_payld_s unf_cmnd_pload_bl; + struct unf_frame_payld_s unf_rsp_pload_bl; + struct unf_frame_payld_s unf_sense_pload_bl; + void *upper_cmd; + unsigned int abts_maker_status; + +}; + +#define UNF_MAX_SFS_XCHG 2048 +#define UNF_RESERVE_SFS_XCHG 128 /* times on exchange mgr num */ + +struct unf_lport_cfg_item_s { + unsigned int port_id; + unsigned int port_mode; /* INI(0x20) TGT(0x10) BOTH(0x30) */ + unsigned int port_topology; /* 0x3:loop , 0xc:p2p ,0xf:auto */ + unsigned int max_queue_depth; + unsigned int max_io; /* Recommended Value 512-4096 */ + unsigned int max_login; + unsigned int max_sfs_xchg; + /* 0:auto 1:1Gbps 2:2Gbps 4:4Gbps 8:8Gbps 16:16Gbps */ + unsigned int port_speed; + unsigned int tape_support; /* tape support */ + unsigned int fcp_conf; /* fcp confirm support */ + unsigned int bb_scn; + unsigned int sum_resource; + enum int_e res_mgmt_enabled; +}; + +struct unf_port_dynamic_info_s { + unsigned int sfp_posion; + unsigned int sfp_valid; + unsigned int phy_link; + unsigned int firmware_state; + unsigned int cur_speed; + unsigned int mailbox_timeout_cnt; +}; + +struct unf_hinicam_pkg { + unsigned int msg_format; + void *buff_in; + void *buff_out; + unsigned int in_size; + unsigned int *out_size; +}; + +struct unf_version_str_s { + char *buf; + unsigned int buf_len; +}; + +struct unf_buf_s { + unsigned char *cbuf; + unsigned int buf_len; +}; + +struct unf_rw_reg_param_s { + unsigned int rw_type; + unsigned int offset; + unsigned long long value; +}; + +/* get ucode & up ver */ +#define HIFC_VER_LEN (16) +#define HIFC_COMPILE_TIME_LEN (20) +struct unf_fw_version_s { + unsigned int message_type; + unsigned char fw_version[HIFC_VER_LEN]; +}; + +enum unf_port_config_set_op_e { + UNF_PORT_CFG_SET_SPEED, + UNF_PORT_CFG_SET_TOPO, + UNF_PORT_CFG_SET_BBSCN, + UNF_PORT_CFG_SET_MODE, + UNF_PORT_CFG_SET_SFP_SWITCH, + UNF_PORT_CFG_SET_PORT_SWITCH, + UNF_PORT_CFG_SET_POWER_STATE, + UNF_PORT_CFG_SET_PORT_STATE, + UNF_PORT_CFG_SET_INTR_COALSEC, + UNF_PORT_CFG_UPDATE_PORT, + UNF_PORT_CFG_UPDATE_WWN, + UNF_PORT_CFG_TEST_FLASH, + UNF_PORT_CFG_SET_FCP_CONF, + UNF_PORT_CFG_SET_LOOP_ROLE, + UNF_PORT_CFG_SET_INIT_REQ, + UNF_PORT_CFG_SET_MAX_SUPPORT_SPEED, + UNF_PORT_CFG_SET_MAC_ADDR, + UNF_PORT_CFG_SET_SFP_USEDTIME, + UNF_PORT_CFG_SET_PORT_TRANSFER_PARAMETER, + UNF_PORT_CFG_SET_SFP_REG_WRITE, + UNF_PORT_CFG_UPDATE_SFP, + UNF_PORT_CFG_UPDATE_FABRIC_PARAM, + UNF_PORT_CFG_UPDATE_PLOGI_PARAM, + UNF_PORT_CFG_UPDATE_FDISC_PARAM, + UNF_PORT_CFG_SAVE_HBA_INFO, + UNF_PORT_CFG_SET_HBA_BASE_INFO, + UNF_PORT_CFG_SET_FLASH_DATA_INFO, + UNF_PORT_CFG_SET_BUTT +}; + +enum unf_port_config_get_op_e { + UNF_PORT_CFG_GET_SPEED_CFG, + UNF_PORT_CFG_GET_SPEED_ACT, + UNF_PORT_CFG_GET_TOPO_CFG, + UNF_PORT_CFG_GET_TOPO_ACT, + UNF_PORT_CFG_GET_MODE, + UNF_PORT_CFG_GET_LOOP_MAP, + UNF_PORT_CFG_GET_TOV, + UNF_PORT_CFG_GET_SFP_PRESENT, + UNF_PORT_CFG_GET_SFP_INFO, + UNF_PORT_CFG_GET_FW_VER, + UNF_PORT_CFG_GET_HW_VER, + UNF_PORT_CFG_GET_LESB_THEN_CLR, /* Link Error Status Block, LESB */ + UNF_PORT_CFG_GET_DYNAMIC_INFO, + UNF_PORT_CFG_GET_VITAL_REGS, + UNF_PORT_CFG_CLR_LESB, + UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, + UNF_PORT_CFG_GET_WORKBALE_BBSCN, + UNF_PORT_CFG_GET_FC_SERDES, + UNF_PORT_CFG_GET_LOOP_ALPA, + UNF_PORT_CFG_GET_SFP_DYNAMIC_INFO, + UNF_PORT_CFG_GET_MAC_ADDR, + UNF_PORT_CFG_GET_SFP_USEDTIME, + UNF_PORT_CFG_GET_PORT_INFO, + UNF_PORT_CFG_DDT_TEST, + UNF_PORT_CFG_GET_LED_STATE, + UNF_PORT_CFG_GET_VLAN, + UNF_PORT_CFG_GET_SFP_REG_READ, + UNF_PORT_CFG_GET_SFP_VER, + UNF_PORT_CFG_GET_SFP_SUPPORT_UPDATE, + UNF_PORT_CFG_GET_SFP_LOG, + UNF_PORT_CFG_GET_FEC, + UNF_PORT_CFG_GET_PCIE_LINK_STATE, + UNF_PORT_CFG_GET_FLASH_DATA_INFO, + UNF_PORT_CFG_GET_BUTT +}; + +enum unf_port_diag_op_e { + UNF_PORT_DIAG_PORT_DETAIL, + UNF_PORT_DIAG_RD_WR_REG, + UNF_PORT_DIAG_BUTT +}; + +enum unf_port_config_state_e { + UNF_PORT_CONFIG_STATE_START, + UNF_PORT_CONFIG_STATE_STOP, + UNF_PORT_CONFIG_STATE_RESET, + UNF_PORT_CONFIG_STATE_STOP_INTR, + UNF_PORT_CONFIG_STATE_BUTT +}; + +struct unf_port_login_parms_s { + enum unf_act_topo_e en_act_topo; + + unsigned int rport_index; + unsigned int seq_cnt : 1; + unsigned int ed_tov : 1; + unsigned int reserved : 14; + unsigned int tx_mfs : 16; + unsigned int ed_tov_timer_val; + + unsigned char remote_rttov_tag; + unsigned char remote_edtov_tag; + unsigned short remote_bbcredit; + unsigned short compared_bbscn; + unsigned int compared_edtov_val; + unsigned int compared_ratov_val; + unsigned int els_cmnd_code; +}; + +#define HIFC_FLASH_MAX_LEN 1024 // bytes + +struct unf_mbox_head_info_s { + /* mbox header */ + unsigned char cmnd_type; + unsigned char length; + unsigned char port_id; + unsigned char pad0; + + /* operation */ + unsigned int op_code : 4; + unsigned int pad1 : 28; +}; + +#define HIFC_FLASH_MBOX_HEAD_MAX_LEN 8 // bytes +struct unf_mbox_head_sts_s { + /* mbox header */ + unsigned char cmnd_type; + unsigned char length; + unsigned char port_id; + unsigned char pad0; + + /* operation */ + unsigned short pad1; + unsigned char pad2; + unsigned char status; +}; + +#define HIFC_FLASH_UEFI_MAX_LEN 16 // bytes +struct unf_flash_uefi_switch_s { + unsigned char writeflag; + unsigned char sanbooten; + unsigned char reserved[14]; +}; + +#define HIFC_MGMT_UEFI_MAGIC_NUM 0xAF +#define HIFC_MGMT_TMO_MAGIC_NUM 0xAE + +#define HIFC_FLASH_LINK_TMO_MAX_LEN 16 // bytes +struct unf_flash_link_tmo_s { + unsigned char writeflag; + unsigned char link_tmo0; + unsigned char link_tmo1; + unsigned char link_tmo2; + unsigned char link_tmo3; + unsigned char reserved[11]; +}; + +#define HIFC_FLASH_DATA_MAX_LEN (HIFC_FLASH_MAX_LEN - \ + HIFC_FLASH_MBOX_HEAD_MAX_LEN) // bytes +struct unf_flash_data_s { + struct unf_flash_uefi_switch_s uefi_switch; // 16 bytes + struct unf_flash_link_tmo_s link_tmo; // 16 bytes + /* once the related struct change, the reserved size needs modify */ + unsigned char reserved[HIFC_FLASH_DATA_MAX_LEN - 32]; +}; + +/* size of hifc_flash_data_mgmt not more than 1024 bytes */ +struct unf_mbox_flash_data_mgmt_s { + struct unf_mbox_head_info_s mbox_head; // 8 bytes + struct unf_flash_data_s flash_data; +}; + +struct unf_flash_data_mgmt_sts_s { + struct unf_mbox_head_sts_s mbox_head; // 8 bytes + struct unf_flash_data_s flash_data; +}; + +struct unf_low_level_service_op_s { + unsigned int (*pfn_unf_els_send)(void *, struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_bls_send)(void *, struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_gs_send)(void *, struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_fc_4_ls_send)(void *, struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_cmnd_send)(void *, struct unf_frame_pkg_s *); + unsigned int (*pfn_ll_relese_xchg_res)(void *, + struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_release_rport_res)(void *, struct + unf_rport_info_s *); + unsigned int (*pfn_unf_get_consumed_res)(void *, + struct unf_frame_pkg_s *); + unsigned int (*pfn_unf_flush_ini_resp_que)(void *); + unsigned int (*pfn_unf_alloc_rport_res)(void *, + struct unf_rport_info_s *); + unsigned int (*pfn_unf_rport_session_rst)(void *, + struct unf_rport_info_s *); +}; + +struct unf_low_level_port_mgr_op_s { + /* fcport/opcode/input parameter */ + unsigned int (*pfn_ll_port_config_set) + (void *v_fc_port, + enum unf_port_config_set_op_e v_op_code, + void *v_para_in); + /* fcport/opcode/output parameter */ + unsigned int (*pfn_ll_port_config_get) + (void *v_fc_port, + enum unf_port_config_get_op_e v_op_code, + void *v_para_out); + /* fcport/opcode/input parameter/output parameter */ + unsigned int (*pfn_ll_port_diagnose) + (void *v_fc_port, + enum unf_port_diag_op_e v_op_code, + void *v_para); + +}; + +struct unf_chip_info_s { + unsigned char chip_type; + unsigned char chip_work_mode; + unsigned char disable_err_flag; +}; + +struct unf_low_level_function_op_s { + struct unf_chip_info_s chip_info; + /* low level type */ + unsigned int low_level_type; + /* low level name, fc etc. */ + const char *name; + struct pci_dev *dev; + unsigned long long sys_node_name; + unsigned long long sys_port_name; + + struct unf_lport_cfg_item_s lport_cfg_items; + + /* low level Xchg mgr type, + * active --alloc oxid and rxid + * passtive -- not alloc oxid and rxid + */ +#define UNF_LOW_LEVEL_MGR_TYPE_ACTIVE 0 +#define UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE 1 + const unsigned int xchg_mgr_type; + +#define UNF_NO_EXTRA_ABTS_XCHG 0x0 +#define UNF_LL_IOC_ABTS_XCHG 0x1 + const unsigned int abts_xchg; +#define UNF_CM_RPORT_SET_QUALIFIER 0x0 +#define UNF_CM_RPORT_SET_QUALIFIER_REUSE 0x1 +#define UNF_CM_RPORT_SET_QUALIFIER_HIFC 0x2 + /* low level pass-through flag. */ +#define UNF_LOW_LEVEL_PASS_THROUGH_FIP 0x0 +#define UNF_LOW_LEVEL_PASS_THROUGH_FABRIC_LOGIN 0x1 +#define UNF_LOW_LEVEL_PASS_THROUGH_PORT_LOGIN 0x2 + unsigned int pass_through_flag; + /* low level parameter */ + unsigned int support_max_npiv_num; + unsigned int support_max_speed; + unsigned int fc_ser_max_speed; + unsigned int support_max_rport; + unsigned int support_max_xid_range; + unsigned int sfp_type; + unsigned int update_fw_reset_active; + unsigned int support_upgrade_report; + unsigned int multi_conf_support; + unsigned int port_type; +#define UNF_LOW_LEVEL_RELEASE_RPORT_SYNC 0x0 +#define UNF_LOW_LEVEL_RELEASE_RPORT_ASYNC 0x1 + unsigned char rport_release_type; +#define UNF_LOW_LEVEL_SIRT_PAGE_MODE_FIXED 0x0 +#define UNF_LOW_LEVEL_SIRT_PAGE_MODE_XCHG 0x1 + unsigned char sirt_page_mode; + unsigned char sfp_speed; + /* IO reference */ + struct unf_low_level_service_op_s service_op; + /* Port Mgr reference */ + struct unf_low_level_port_mgr_op_s port_mgr_op; + unsigned char chip_id; +}; + +struct unf_cm_handle_op_s { + /* return:L_Port */ + void *(*pfn_unf_alloc_local_port)(void *, + struct unf_low_level_function_op_s *); + /* input para:L_Port */ + unsigned int (*pfn_unf_release_local_port)(void *); + /* input para:lport vn2vnid,output para:ok/err */ + unsigned int (*pfn_unf_set_vn2vn_id)(void *, unsigned int); + unsigned char (*pfn_unf_get_loop_id)(unsigned int v_port_id); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_els_pkg)(void *v_lport, + struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_gs_pkg)(void *v_lport, + struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_bls_pkg)(void *v_lport, + struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_fc4_ls_pkg)( + void *v_lport, + struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_send_els_done)(void *v_lport, + struct unf_frame_pkg_s *v_pkg); + unsigned int (*pfn_unf_send_fc4_ls_done)(void *v_lport, + struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_marker_status)( + void *v_lport, struct unf_frame_pkg_s *v_pkg); + unsigned int (*pfn_unf_receive_abts_marker_status)( + void *v_lport, struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, FRAME_PKG_S */ + unsigned int (*pfn_unf_receive_ini_rsponse)( + void *v_lport, struct unf_frame_pkg_s *v_pkg); + int (*pfn_unf_get_cfg_parms)(char *v_section_name, + struct unf_cfg_item_s *v_cfg_parm, + unsigned int *v_cfg_value, + unsigned int v_item_num); + unsigned int (*pfn_unf_cm_get_sgl_entry)(void *v_pkg, + char **v_buf, + unsigned int *v_buf_len); + unsigned int (*pfn_unf_cm_get_dif_sgl_entry)(void *v_pkg, + char **v_buf, + unsigned int *v_buf_len); + struct unf_esgl_page_s *(*pfn_unf_get_one_free_esgl_page)( + void *v_lport, struct unf_frame_pkg_s *v_pkg); + /* input para:L_Port, EVENT */ + unsigned int (*pfn_unf_fc_port_link_event)(void *v_lport, + unsigned int v_events, + void *v_input); + unsigned int (*pfn_unf_fcoe_update_fcf_name)(void *v_lport, + void *v_input); + int (*pfn_unf_ioctl_to_com_handler)(void *v_lport, + struct unf_hinicam_pkg *v_pkg); +}; + +unsigned int unf_get_cm_handle_op(struct unf_cm_handle_op_s *v_cm_handle); +int unf_common_init(void); +void unf_common_exit(void); + +struct unf_port_info_entry_s { + unsigned int bb_scn; + unsigned int speed; + unsigned int topo; + unsigned int fec; +}; + +enum drv_cable_connector_type_e { + DRV_CABLE_CONNECTOR_NONE, + DRV_CABLE_CONNECTOR_OPTICAL, + DRV_CABLE_CONNECTOR_COPPER, + DRV_CABLE_CONNECTOR_INVALID, + DRV_CABLE_CONNECTOR_BUTT +}; + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_init.c b/drivers/scsi/huawei/hifc/unf_init.c new file mode 100644 index 000000000000..c902a7f71bf5 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_init.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Fabric Channel Linux driver + * Copyright(c) 2018 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_event.h" +#include "unf_exchg.h" +#include "unf_portman.h" +#include "unf_rport.h" +#include "unf_service.h" +#include "unf_io.h" + +#define RPORT_FEATURE_POOL_SIZE 4096 + +static struct unf_esgl_page_s *unf_cm_get_one_free_esgl_page( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +static unsigned int unf_recv_tmf_marker_status( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +static unsigned int unf_recv_abts_mrker_status( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +static int unf_get_cfg_parms(char *v_section_name, + struct unf_cfg_item_s *v_cfg_parm, + unsigned int *v_cfg_value, + unsigned int v_item_num); + + +/* global variables */ +unsigned int event_thread_exit; +struct task_struct *event_thread; + +struct completion *fc_event_handle_thd_comp; +struct workqueue_struct *unf_work_queue; + +struct unf_global_card_thread_s card_thread_mgr; +unsigned int unf_dbg_level = UNF_MAJOR; +unsigned int log_print_level = UNF_INFO; +unsigned int log_limted_times = UNF_LOGIN_ATT_PRINT_TIMES; + +struct unf_cm_handle_op_s cm_low_levle_handle = { + .pfn_unf_alloc_local_port = unf_lport_create_and_init, + .pfn_unf_release_local_port = unf_release_local_port, + .pfn_unf_receive_els_pkg = unf_receive_els_pkg, + .pfn_unf_receive_gs_pkg = unf_receive_gs_pkg, + .pfn_unf_receive_bls_pkg = unf_receive_bls_pkg, + .pfn_unf_send_els_done = unf_send_els_done, + .pfn_unf_receive_ini_rsponse = unf_ini_scsi_completed, + .pfn_unf_get_cfg_parms = unf_get_cfg_parms, + .pfn_unf_receive_marker_status = unf_recv_tmf_marker_status, + .pfn_unf_receive_abts_marker_status = unf_recv_abts_mrker_status, + + .pfn_unf_cm_get_sgl_entry = unf_ini_get_sgl_entry, + .pfn_unf_cm_get_dif_sgl_entry = unf_ini_get_dif_sgl_entry, + .pfn_unf_get_one_free_esgl_page = unf_cm_get_one_free_esgl_page, + .pfn_unf_fc_port_link_event = unf_fc_port_link_event, + .pfn_unf_ioctl_to_com_handler = unf_cmd_adm_handler, +}; + +static struct unf_esgl_page_s *unf_cm_get_one_free_esgl_page( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + + UNF_CHECK_VALID(0x1700, 1, v_lport, return NULL); + UNF_CHECK_VALID(0x1701, 1, v_fra_pkg, return NULL); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_fra_pkg->xchg_contex; + + return unf_get_one_free_esgl_page(lport, xchg); /* from esgl pool */ +} + +static int unf_get_cfg_parms(char *v_section_name, + struct unf_cfg_item_s *v_cfg_parm, + unsigned int *v_cfg_value, + unsigned int v_item_num) +{ + /* Maximum length of a configuration item value, + * including the end character + */ +#define UNF_MAX_ITEM_VALUE_LEN (256) + + unsigned int *value = NULL; + struct unf_cfg_item_s *cfg_parm = NULL; + unsigned int i = 0; + + cfg_parm = v_cfg_parm; + value = v_cfg_value; + + for (i = 0; i < v_item_num; i++) { + if (!cfg_parm || !value) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "[err]Config name or value is NULL"); + + return UNF_RETURN_ERROR; + } + + if (strcmp("End", cfg_parm->name) == 0) + break; + + if (strcmp("fw_path", cfg_parm->name) == 0) { + cfg_parm++; + value += UNF_MAX_ITEM_VALUE_LEN / sizeof(unsigned int); + + continue; + } + + *value = cfg_parm->default_value; + cfg_parm++; + value++; + } + + return RETURN_OK; +} + +static unsigned int unf_recv_tmf_marker_status( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned short hot_pool_tag = 0; + + UNF_CHECK_VALID(0x3543, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3544, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + + /* Find exchange which point to marker sts */ + if (!lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) tag function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + + xchg = (struct unf_xchg_s *) + (lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag((void *)lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", + lport->port_id, lport->nport_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + /* + * NOTE: set exchange TMF state with MARKER_STS_RECEIVED + * + * About TMF state + * 1. STS received + * 2. Response received + * 3. Do check if necessary + */ + xchg->tmf_state |= MARKER_STS_RECEIVED; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Marker STS: D_ID(0x%x) S_ID(0x%x) OX_ID(0x%x) RX_ID(0x%x), EXCH: D_ID(0x%x) S_ID(0x%x) OX_ID(0x%x) RX_ID(0x%x)", + v_fra_pkg->frame_head.rctl_did & UNF_NPORTID_MASK, + v_fra_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, + (unsigned short)(v_fra_pkg->frame_head.oxid_rxid >> 16), + (unsigned short)(v_fra_pkg->frame_head.oxid_rxid), + xchg->did, + xchg->sid, + xchg->ox_id, + xchg->rx_id); + + return RETURN_OK; +} + +static unsigned int unf_recv_abts_mrker_status( + void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3543, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3544, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + + /* Find exchange by tag */ + if (!lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) tag function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + + xchg = (struct unf_xchg_s *) + (lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag((void *)lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", + lport->port_id, lport->nport_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + /* + * NOTE: set exchange ABTS state with MARKER_STS_RECEIVED + * + * About exchange ABTS state + * 1. STS received + * 2. Response received + * 3. Do check if necessary + * + * About Exchange status get from low level + * 1. Set: when RCVD ABTS Marker + * 2. Set: when RCVD ABTS Req Done + * 3. value: set value with pkg->status + */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->ucode_abts_state = v_fra_pkg->status; + xchg->abts_state |= MARKER_STS_RECEIVED; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x) wake up SEMA for Abts marker exchange(0x%p) oxid(0x%x 0x%x) status(0x%x)", + lport->port_id, xchg, xchg->ox_id, xchg->hot_pool_tag, + v_fra_pkg->abts_maker_status); + + /* + * NOTE: Second time for ABTS marker received, or + * ABTS response have been received, no need to wake up sema + */ + if ((xchg->io_state & INI_IO_STATE_ABORT_TIMEOUT) || + (xchg->abts_state & ABTS_RESPONSE_RECEIVED)) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) no need to wake up SEMA for Abts marker ABTS_STATE(0x%x) IO_STATE(0x%x)", + lport->port_id, xchg->abts_state, + xchg->io_state); + + return RETURN_OK; + } + if (xchg->io_state & INI_IO_STATE_TMF_ABORT) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) receive Abts marker, exchange(%p) state(0x%x) free it", + lport->port_id, xchg, xchg->io_state); + + unf_cm_free_xchg(lport, xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + up(&xchg->task_sema); + } + + return RETURN_OK; +} + +unsigned int unf_get_cm_handle_op(struct unf_cm_handle_op_s *v_cm_handle) +{ + UNF_CHECK_VALID(0x1708, UNF_TRUE, v_cm_handle, + return UNF_RETURN_ERROR); + + memcpy(v_cm_handle, &cm_low_levle_handle, + sizeof(struct unf_cm_handle_op_s)); + + return RETURN_OK; +} + +static void unf_uninit_cm_low_level_handle(void) +{ + memset(&cm_low_levle_handle, 0, sizeof(struct unf_cm_handle_op_s)); +} + +int unf_event_process(void *v_arg) +{ + struct list_head *node = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long flags = 0; + + UNF_REFERNCE_VAR(v_arg); + + set_user_nice(current, 4); + recalc_sigpending(); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[event]Enter event thread"); + + complete(fc_event_handle_thd_comp); + + do { + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, flags); + if (list_empty(&fc_event_list.list_head) == UNF_TRUE) { + spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock, + flags); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout((long)msecs_to_jiffies(1000)); + } else { + node = (&fc_event_list.list_head)->next; + list_del_init(node); + fc_event_list.list_num--; + event_node = list_entry(node, + struct unf_cm_event_report, + list_entry); + spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock, + flags); + + /* Process event node */ + unf_handle_event(event_node); + } + } while (!event_thread_exit); + + complete_and_exit(fc_event_handle_thd_comp, 0); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[event]Event thread exit"); + + return RETURN_OK; +} + +static unsigned int unf_creat_event_center(void) +{ + struct completion fc_event_completion = + COMPLETION_INITIALIZER(fc_event_completion); + + struct completion *p_fc_event_completion = &fc_event_completion; + + INIT_LIST_HEAD(&fc_event_list.list_head); + fc_event_list.list_num = 0; + spin_lock_init(&fc_event_list.fc_eventlist_lock); + fc_event_handle_thd_comp = p_fc_event_completion; + + event_thread = kthread_run(unf_event_process, NULL, "hifc_event"); + if (IS_ERR(event_thread)) { + complete_and_exit(fc_event_handle_thd_comp, 0); + fc_event_handle_thd_comp = NULL; + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create event thread failed(0x%p)", + event_thread); + + return UNF_RETURN_ERROR; + } + wait_for_completion(fc_event_handle_thd_comp); + return RETURN_OK; +} + +static void unf_cm_event_thread_exit(void) +{ + struct completion fc_event_completion = + COMPLETION_INITIALIZER(fc_event_completion); + + struct completion *p_fc_event_completion = &fc_event_completion; + + fc_event_handle_thd_comp = p_fc_event_completion; + event_thread_exit = 1; + wake_up_process(event_thread); + wait_for_completion(fc_event_handle_thd_comp); + + fc_event_handle_thd_comp = NULL; +} + +static void unf_cm_cread_card_mgr_list(void) +{ + /* So far, do not care */ + INIT_LIST_HEAD(&card_thread_mgr.list_card_list_head); + + spin_lock_init(&card_thread_mgr.global_card_list_lock); + + card_thread_mgr.card_sum = 0; +} + +static int unf_port_feature_pool_init(void) +{ + unsigned int i = 0; + unsigned int rport_fea_pool_size = 0; + struct unf_rport_feature_recard_s *rport_fea_recard = NULL; + unsigned long flags = 0; + + rport_fea_pool_size = sizeof(struct unf_rport_feature_pool_s); + port_fea_pool = vmalloc(rport_fea_pool_size); + if (!port_fea_pool) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]cannot allocate rport feature pool"); + + return UNF_RETURN_ERROR; + } + memset(port_fea_pool, 0, rport_fea_pool_size); + spin_lock_init(&port_fea_pool->port_fea_pool_lock); + INIT_LIST_HEAD(&port_fea_pool->list_busy_head); + INIT_LIST_HEAD(&port_fea_pool->list_free_head); + + port_fea_pool->p_port_feature_pool_addr = + vmalloc((size_t)(RPORT_FEATURE_POOL_SIZE * + sizeof(struct unf_rport_feature_recard_s))); + if (!port_fea_pool->p_port_feature_pool_addr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]cannot allocate rport feature pool address"); + + vfree(port_fea_pool); + port_fea_pool = NULL; + + return UNF_RETURN_ERROR; + } + + memset(port_fea_pool->p_port_feature_pool_addr, 0, + sizeof(struct unf_rport_feature_recard_s) * + RPORT_FEATURE_POOL_SIZE); + rport_fea_recard = + (struct unf_rport_feature_recard_s *) + port_fea_pool->p_port_feature_pool_addr; + + spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags); + for (i = 0; i < RPORT_FEATURE_POOL_SIZE; i++) { + list_add_tail(&rport_fea_recard->entry_feature, + &port_fea_pool->list_free_head); + rport_fea_recard++; + } + spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags); + + return RETURN_OK; +} + +void unf_free_port_feature_pool(void) +{ + if (port_fea_pool->p_port_feature_pool_addr) { + vfree(port_fea_pool->p_port_feature_pool_addr); + port_fea_pool->p_port_feature_pool_addr = NULL; + } + vfree(port_fea_pool); + port_fea_pool = NULL; +} + +int unf_common_init(void) +{ + int ret = RETURN_OK; + + unf_dbg_level = UNF_MAJOR; + log_print_level = UNF_KEVENT; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "UNF Driver Version:%s.", UNF_FC_VERSION); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "UNF Compile Time: %s", __TIME_STR__); + + ret = unf_port_feature_pool_init(); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port Feature Pool init failed"); + + return ret; + } + + /* 1. Init Transport */ + ret = (int)unf_register_ini_transport(); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]INI interface init failed"); + unf_free_port_feature_pool(); + + return ret; + } + + /* 2. Init L_Port MG: Y */ + unf_port_mgmt_init(); + + /* 3. Init card MG list: N */ + unf_cm_cread_card_mgr_list(); + + /* 4. Init global event resource: N */ + ret = (int)unf_init_global_event_msg(); + if (ret != RETURN_OK) { + unf_unregister_ini_transport(); + unf_free_port_feature_pool(); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create global event center failed"); + + return ret; + } + + /* 5. Create event center(one thread per pf): Y */ + ret = (int)unf_creat_event_center(); + if (ret != RETURN_OK) { + unf_destroy_global_event_msg(); + unf_unregister_ini_transport(); + unf_free_port_feature_pool(); + + fc_event_handle_thd_comp = NULL; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create event center (thread) failed"); + + return ret; + } + + /* 6. Create work queue: Y */ + unf_work_queue = create_workqueue("unf_wq"); + if (!unf_work_queue) { + /* event thread exist */ + unf_cm_event_thread_exit(); + unf_destroy_global_event_msg(); + + fc_event_handle_thd_comp = NULL; + unf_unregister_ini_transport(); + unf_free_port_feature_pool(); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create work queue failed"); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Init common layer succeed"); + + return ret; +} + +static void unf_destroy_dirty_port(void) +{ + unsigned int v_ditry_port_num = 0; + + unf_show_dirty_port(UNF_FALSE, &v_ditry_port_num); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Sys has %d dirty L_Port(s)", v_ditry_port_num); +} + +void unf_common_exit(void) +{ + unf_free_port_feature_pool(); + + unf_destroy_dirty_port(); + + flush_workqueue(unf_work_queue); + destroy_workqueue(unf_work_queue); + unf_work_queue = NULL; + + unf_cm_event_thread_exit(); + + unf_destroy_global_event_msg(); + + unf_uninit_cm_low_level_handle(); + + unf_port_mgmt_deinit(); + + unf_unregister_ini_transport(); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]HIFC module remove succeed"); +} diff --git a/drivers/scsi/huawei/hifc/unf_log.h b/drivers/scsi/huawei/hifc/unf_log.h new file mode 100644 index 000000000000..a46a77a42ded --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_log.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_LOG_H__ +#define __UNF_LOG_H__ + +#define UNF_CRITICAL 1 +#define UNF_ERR 2 +#define UNF_WARN 3 +#define UNF_KEVENT 4 +#define UNF_MAJOR 5 +#define UNF_MINOR 6 +#define UNF_INFO 7 +#define UNF_DATA 7 +#define UNF_ALL 7 + +enum unf_debug_type_e { + UNF_DEBUG_TYPE_MML = 0, + UNF_DEBUG_TYPE_DIAGNOSE = 1, + UNF_DEBUG_TYPE_MESSAGE = 2, + UNF_DEBUG_TYPE_BUTT +}; + +enum unf_log_attr_e { + UNF_LOG_LOGIN_ATT = 0x1, + UNF_LOG_IO_ATT = 0x2, + UNF_LOG_EQUIP_ATT = 0x4, + UNF_LOG_REG_ATT = 0x8, + UNF_LOG_REG_MML_TEST = 0x10, + UNF_LOG_EVENT = 0x20, + UNF_LOG_NORMAL = 0x40, + UNF_LOG_ABNORMAL = 0X80, + UNF_LOG_BUTT +}; + +enum event_log_e { + UNF_EVTLOG_DRIVER_SUC = 0, + UNF_EVTLOG_DRIVER_INFO, + UNF_EVTLOG_DRIVER_WARN, + UNF_EVTLOG_DRIVER_ERR, + UNF_EVTLOG_LINK_SUC, + UNF_EVTLOG_LINK_INFO, + UNF_EVTLOG_LINK_WARN, + UNF_EVTLOG_LINK_ERR, + UNF_EVTLOG_IO_SUC, + UNF_EVTLOG_IO_INFO, + UNF_EVTLOG_IO_WARN, + UNF_EVTLOG_IO_ERR, + UNF_EVTLOG_TOOL_SUC, + UNF_EVTLOG_TOOL_INFO, + UNF_EVTLOG_TOOL_WARN, + UNF_EVTLOG_TOOL_ERR, + UNF_EVTLOG_BUT +}; + +#define UNF_IO_ATT_PRINT_TIMES 2 +#define UNF_LOGIN_ATT_PRINT_TIMES 100 + +#define UNF_IO_ATT_PRINT_LIMIT msecs_to_jiffies(2 * 1000) + +extern unsigned int unf_dbg_level; +extern unsigned int log_print_level; +extern unsigned int log_limted_times; + +#define DRV_LOG_LIMIT(module_id, log_level, log_id, log_att, format, ...) \ + do { \ + static unsigned long pre; \ + static int should_print = UNF_LOGIN_ATT_PRINT_TIMES; \ + if (time_after_eq(jiffies, pre + (UNF_IO_ATT_PRINT_LIMIT))) { \ + if (log_att == UNF_LOG_ABNORMAL) { \ + should_print = UNF_IO_ATT_PRINT_TIMES; \ + } else { \ + should_print = log_limted_times; \ + } \ + } \ + if (should_print < 0) { \ + if (log_att != UNF_LOG_ABNORMAL) { \ + pre = jiffies; \ + } \ + break; \ + } \ + if (should_print-- > 0) { \ + printk(log_level \ + "[%d][FC_UNF]" format "[%s][%-5d]\n", \ + smp_processor_id(), ##__VA_ARGS__, \ + __func__, __LINE__); \ + } \ + if (should_print == 0) { \ + printk(log_level \ + "[FC_UNF]log is limited[%s][%-5d]\n", \ + __func__, __LINE__); \ + } \ + pre = jiffies; \ + } while (0) + +#define UNF_CHECK_VALID(logid, need_check, condition, fail_do) \ + do { \ + if (unlikely(!(condition))) { \ + UNF_TRACE((logid), UNF_LOG_REG_ATT, UNF_ERR, \ + "Para check(%s) invalid", #condition); \ + fail_do; \ + } \ + } while (0) + +#define HIUNF_TRACE(log_id, log_att, log_level, format, ...) \ + do { \ + if (unlikely((log_level) <= log_print_level)) { \ + if (log_level == UNF_CRITICAL) { \ + DRV_LOG_LIMIT(UNF_PID, KERN_CRIT, log_id, \ + log_att, format, ##__VA_ARGS__); \ + } else if (log_level == UNF_WARN) { \ + DRV_LOG_LIMIT(UNF_PID, KERN_WARNING, log_id, \ + log_att, format, ##__VA_ARGS__); \ + } else if (log_level == UNF_ERR) { \ + DRV_LOG_LIMIT(UNF_PID, KERN_ERR, log_id, \ + log_att, format, ##__VA_ARGS__); \ + } else if (log_level == UNF_MAJOR || \ + log_level == UNF_MINOR || \ + log_level == UNF_KEVENT) { \ + DRV_LOG_LIMIT(UNF_PID, KERN_NOTICE, log_id, \ + log_att, format, ##__VA_ARGS__); \ + } else if (log_level == UNF_INFO || \ + log_level == UNF_DATA) { \ + DRV_LOG_LIMIT(UNF_PID, KERN_INFO, log_id, \ + log_att, format, ##__VA_ARGS__); \ + } \ + } \ + } while (0) + +#define UNF_TRACE(log_id, log_att, log_level, fmt, ...) \ + do { \ + HIUNF_TRACE(log_id, log_att, log_level, fmt, ##__VA_ARGS__); \ + } while (0) + +#define UNF_INIT_PRIVATE_ST(private_st) \ + do { \ + memset(&(private_st), 0, sizeof(private_st)); \ + } while (0) + +#define UNF_PRINT_SFS(dbg_level, portid, v_data, v_size) \ + do { \ + if ((dbg_level) <= log_print_level) { \ + unsigned int cnt = 0; \ + printk(KERN_INFO "[INFO]Port(0x%x) sfs:0x", \ + (portid)); \ + for (cnt = 0; cnt < (v_size) / 4; cnt++) { \ + printk(KERN_INFO "%08x ", \ + ((unsigned int *)v_data)[cnt]); \ + } \ + printk(KERN_INFO "[FC_UNF][%s]\n", __FUNCTION__); \ + } \ + } while (0) + +#define UNF_PRINT_SFS_LIMIT(dbg_level, portid, v_data, v_size) \ + do { \ + if ((dbg_level) <= log_print_level) { \ + static unsigned long pre; \ + static int should_print = UNF_LOGIN_ATT_PRINT_TIMES; \ + if (time_after_eq(jiffies, pre + \ + UNF_IO_ATT_PRINT_LIMIT)) { \ + should_print = log_limted_times; \ + } \ + if (should_print < 0) { \ + pre = jiffies; \ + break; \ + } \ + if (should_print-- > 0) { \ + UNF_PRINT_SFS(dbg_level, portid, \ + v_data, v_size); \ + } \ + if (should_print == 0) { \ + printk(KERN_INFO "[FC_UNF]sfs log is limited[%s][%-5d]\n", \ + __func__, __LINE__); \ + } \ + pre = jiffies; \ + } \ + } while (0) + +#define UNF_REFERNCE_VAR(var) + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_scsi.c b/drivers/scsi/huawei/hifc/unf_scsi.c new file mode 100644 index 000000000000..11331bacb914 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_scsi.c @@ -0,0 +1,1556 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_log.h" +#include "unf_scsi_common.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_portman.h" +#include "unf_npiv.h" +#include "unf_exchg.h" +#include "unf_io.h" + +static int unf_scsi_queue_cmd(struct Scsi_Host *shost, + struct scsi_cmnd *v_cmnd); +static int unf_scsi_abort_scsi_cmnd(struct scsi_cmnd *v_cmnd); +static int unf_scsi_device_reset_handler(struct scsi_cmnd *v_cmnd); +static int unf_scsi_bus_reset_handler(struct scsi_cmnd *v_cmnd); +static int unf_scsi_target_reset_handler(struct scsi_cmnd *v_cmnd); +static int unf_scsi_slave_alloc(struct scsi_device *sdev); +static void unf_scsi_destroy_slave(struct scsi_device *sdev); +static int unf_scsi_slave_configure(struct scsi_device *sdev); +static int unf_scsi_scan_finished(struct Scsi_Host *shost, unsigned long time); +static void unf_scsi_scan_start(struct Scsi_Host *shost); + +static struct scsi_transport_template *scsi_transport_template; +static struct scsi_transport_template *scsi_transport_template_v; + +struct unf_ini_error_code_s ini_error_code_table1[] = { + { UNF_IO_SUCCESS, UNF_SCSI_HOST(DID_OK) }, + { UNF_IO_ABORTED, UNF_SCSI_HOST(DID_ABORT) }, + { UNF_IO_FAILED, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_ABORT_ABTS, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_ABORT_LOGIN, UNF_SCSI_HOST(DID_NO_CONNECT) }, + { UNF_IO_ABORT_REET, UNF_SCSI_HOST(DID_RESET) }, + { UNF_IO_ABORT_FAILED, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_OUTOF_ORDER, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_FTO, UNF_SCSI_HOST(DID_TIME_OUT) }, + { UNF_IO_LINK_FAILURE, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_OVER_FLOW, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_RSP_OVER, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_LOST_FRAME, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_UNDER_FLOW, UNF_SCSI_HOST(DID_OK) }, + { UNF_IO_HOST_PROG_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_SEST_PROG_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_INVALID_ENTRY, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_ABORT_SEQ_NOT, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_REJECT, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_EDC_IN_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_EDC_OUT_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_UNINIT_KEK_ERR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_DEK_OUTOF_RANGE, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_KEY_UNWRAP_ERR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_KEY_TAG_ERR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_KEY_ECC_ERR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_BLOCK_SIZE_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_ILLEGAL_CIPHER_MODE, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_CLEAN_UP, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_ABORTED_BY_TARGET, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_TRANSPORT_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_LINK_FLASH, UNF_SCSI_HOST(DID_NO_CONNECT) }, + { UNF_IO_TIMEOUT, UNF_SCSI_HOST(DID_TIME_OUT) }, + { UNF_IO_DMA_ERROR, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_NO_LPORT, UNF_SCSI_HOST(DID_NO_CONNECT) }, + { UNF_IO_NO_XCHG, UNF_SCSI_HOST(DID_SOFT_ERROR) }, + { UNF_IO_SOFT_ERR, UNF_SCSI_HOST(DID_SOFT_ERROR) }, + { UNF_IO_PORT_LOGOUT, UNF_SCSI_HOST(DID_NO_CONNECT) }, + { UNF_IO_ERREND, UNF_SCSI_HOST(DID_ERROR) }, + { UNF_IO_DIF_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION)) }, + { UNF_IO_INCOMPLETE, UNF_SCSI_HOST(DID_IMM_RETRY) }, + { UNF_IO_DIF_REF_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION)) }, + { UNF_IO_DIF_GEN_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION)) } +}; + +unsigned int ini_err_code_table_cnt1 = + sizeof(ini_error_code_table1) / sizeof(struct unf_ini_error_code_s); + +static void unf_set_rport_loss_tmo(struct fc_rport *rport, + unsigned int timeout) +{ + if (timeout) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = 1; +} + +static void unf_get_host_port_id(struct Scsi_Host *shost) +{ + struct unf_lport_s *lport = NULL; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return; + } + + fc_host_port_id(shost) = lport->port_id; +} + +static void unf_get_host_speed(struct Scsi_Host *shost) +{ + struct unf_lport_s *lport = NULL; + unsigned int speed = FC_PORTSPEED_UNKNOWN; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return; + } + + switch (lport->speed) { + case UNF_PORT_SPEED_2_G: + speed = FC_PORTSPEED_2GBIT; + break; + + case UNF_PORT_SPEED_4_G: + speed = FC_PORTSPEED_4GBIT; + break; + + case UNF_PORT_SPEED_8_G: + speed = FC_PORTSPEED_8GBIT; + break; + + case UNF_PORT_SPEED_16_G: + speed = FC_PORTSPEED_16GBIT; + break; + + case UNF_PORT_SPEED_32_G: + speed = FC_PORTSPEED_32GBIT; + break; + + default: + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) with unknown speed(0x%x) for FC mode", + lport->port_id, lport->speed); + break; + } + + fc_host_speed(shost) = speed; +} + +static void unf_get_host_port_type(struct Scsi_Host *shost) +{ + struct unf_lport_s *lport = NULL; + unsigned int port_type = FC_PORTTYPE_UNKNOWN; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return; + } + + switch (lport->en_act_topo) { + case UNF_ACT_TOP_PRIVATE_LOOP: + port_type = FC_PORTTYPE_LPORT; + break; + + case UNF_ACT_TOP_PUBLIC_LOOP: + port_type = FC_PORTTYPE_NLPORT; + break; + + case UNF_ACT_TOP_P2P_DIRECT: + port_type = FC_PORTTYPE_PTP; + break; + + case UNF_ACT_TOP_P2P_FABRIC: + port_type = FC_PORTTYPE_NPORT; + break; + + default: + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) with unknown topo type(0x%x) for FC mode", + lport->port_id, lport->en_act_topo); + break; + } + + fc_host_port_type(shost) = port_type; +} + +static void unf_get_symbolic_name(struct Scsi_Host *shost) +{ + unsigned char *name = NULL; + struct unf_lport_s *lport = NULL; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Check l_port failed"); + + return; + } + + name = fc_host_symbolic_name(shost); + if (name) { + snprintf(name, FC_SYMBOLIC_NAME_SIZE, + "HIFC_FW_RELEASE:%s HIFC_DRV_RELEASE:%s", + lport->fw_version, UNF_FC_VERSION); + } +} + +static void unf_get_host_fabric_name(struct Scsi_Host *shost) +{ + struct unf_lport_s *lport = NULL; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return; + } + + fc_host_fabric_name(shost) = lport->fabric_node_name; +} + +static void unf_get_host_port_state(struct Scsi_Host *shost) +{ + struct unf_lport_s *lport = NULL; + enum fc_port_state port_state; + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return; + } + + switch (lport->link_up) { + case UNF_PORT_LINK_DOWN: + port_state = FC_PORTSTATE_OFFLINE; + break; + + case UNF_PORT_LINK_UP: + port_state = FC_PORTSTATE_ONLINE; + break; + + default: + port_state = FC_PORTSTATE_UNKNOWN; + break; + } + + fc_host_port_state(shost) = port_state; +} + +static void unf_dev_loss_timeout_callbk(struct fc_rport *rport) +{ + /* + * NOTE: about rport->dd_data + * --->>> local SCSI_ID + * 1. Assignment during scsi rport link up + * 2. Released when scsi rport link down & timeout(30s) + * 3. Used during scsi do callback with slave_alloc function + */ + struct Scsi_Host *host = NULL; + struct unf_lport_s *lport = NULL; + unsigned int scsi_id = 0; + + if (unlikely(!rport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SCSI rport is null"); + + return; + } + + host = rport_to_shost(rport); + if (unlikely(!host)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Host is null"); + + return; + } + + /* according to Local SCSI_ID */ + scsi_id = *(unsigned int *)(rport->dd_data); + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return; + } + + lport = (struct unf_lport_s *)host->hostdata[0]; + if (unf_is_lport_valid(lport) == RETURN_OK) { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[event]Port(0x%x_0x%x) RPort scsi_id(0x%x) target_id(0x%x) loss timeout", + lport->port_id, lport->nport_id, + scsi_id, rport->scsi_target_id); + + atomic_inc(&lport->session_loss_tmo); + + /* Free SCSI ID & set table state with DEAD */ + (void)unf_free_scsi_id(lport, scsi_id); + } else { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(%p) is invalid", lport); + } + + /* reset scsi rport dd_data(local SCSI_ID) */ + *((unsigned int *)rport->dd_data) = INVALID_VALUE32; +} + +int unf_scsi_create_vport(struct fc_vport *fc_port, bool disabled) +{ + struct unf_lport_s *vport = NULL; + struct unf_lport_s *lport = NULL; + struct Scsi_Host *shost = NULL; + struct vport_config_s vport_config = { 0 }; + + shost = vport_to_shost(fc_port); + + lport = (struct unf_lport_s *)shost->hostdata[0]; + if (unf_is_lport_valid(lport) != RETURN_OK) { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(%p) is invalid", lport); + + return RETURN_ERROR; + } + + vport_config.port_name = fc_port->port_name; + + vport_config.port_mode = fc_port->roles; + + vport = unf_create_vport(lport, &vport_config); + if (!vport) { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) Create Vport failed on lldrive", + lport->port_id); + + return RETURN_ERROR; + } + + fc_port->dd_data = vport; + + vport->vport = fc_port; + + return RETURN_OK; +} + +int unf_scsi_delete_vport(struct fc_vport *fc_port) +{ + int ret = RETURN_ERROR; + struct unf_lport_s *vport = NULL; + + vport = (struct unf_lport_s *)fc_port->dd_data; + + if (unf_is_lport_valid(vport) != RETURN_OK) { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]VPort(%p) is invalid or is removing", + vport); + + fc_port->dd_data = NULL; + + return ret; + } + + ret = (int)unf_destroy_one_vport(vport); + if (ret != RETURN_OK) { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]VPort(0x%x) destroy failed on drive", + vport->port_id); + + return ret; + } + + fc_port->dd_data = NULL; + + return ret; +} + +struct fc_function_template function_template = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_speeds = 1, + + .get_host_port_id = unf_get_host_port_id, + .show_host_port_id = 1, + .get_host_speed = unf_get_host_speed, + .show_host_speed = 1, + .get_host_port_type = unf_get_host_port_type, + .show_host_port_type = 1, + .get_host_symbolic_name = unf_get_symbolic_name, + .show_host_symbolic_name = 1, + .set_host_system_hostname = NULL, + .show_host_system_hostname = 1, + .get_host_fabric_name = unf_get_host_fabric_name, + .show_host_fabric_name = 1, + .get_host_port_state = unf_get_host_port_state, + .show_host_port_state = 1, + + .dd_fcrport_size = sizeof(void *), + .show_rport_supported_classes = 1, + + .get_starget_node_name = NULL, + .show_starget_node_name = 1, + .get_starget_port_name = NULL, + .show_starget_port_name = 1, + .get_starget_port_id = NULL, + .show_starget_port_id = 1, + + .set_rport_dev_loss_tmo = unf_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 0, + + .issue_fc_host_lip = NULL, + .dev_loss_tmo_callbk = unf_dev_loss_timeout_callbk, + .terminate_rport_io = NULL, + .get_fc_host_stats = NULL, + + .vport_create = unf_scsi_create_vport, + .vport_disable = NULL, + .vport_delete = unf_scsi_delete_vport, + .bsg_request = NULL, + .bsg_timeout = NULL, +}; + +struct fc_function_template function_template_v = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_speeds = 1, + + .get_host_port_id = unf_get_host_port_id, + .show_host_port_id = 1, + .get_host_speed = unf_get_host_speed, + .show_host_speed = 1, + .get_host_port_type = unf_get_host_port_type, + .show_host_port_type = 1, + .get_host_symbolic_name = unf_get_symbolic_name, + .show_host_symbolic_name = 1, + .set_host_system_hostname = NULL, + .show_host_system_hostname = 1, + .get_host_fabric_name = unf_get_host_fabric_name, + .show_host_fabric_name = 1, + .get_host_port_state = unf_get_host_port_state, + .show_host_port_state = 1, + + .dd_fcrport_size = sizeof(void *), + .show_rport_supported_classes = 1, + + .get_starget_node_name = NULL, + .show_starget_node_name = 1, + .get_starget_port_name = NULL, + .show_starget_port_name = 1, + .get_starget_port_id = NULL, + .show_starget_port_id = 1, + + .set_rport_dev_loss_tmo = unf_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + + .issue_fc_host_lip = NULL, + .dev_loss_tmo_callbk = unf_dev_loss_timeout_callbk, + .terminate_rport_io = NULL, + .get_fc_host_stats = NULL, + + .vport_create = NULL, + .vport_disable = NULL, + .vport_delete = NULL, + .bsg_request = NULL, + .bsg_timeout = NULL, +}; + +struct scsi_host_template scsi_host_template = { + .module = THIS_MODULE, + .name = "HIFC", + + .queuecommand = unf_scsi_queue_cmd, + .eh_abort_handler = unf_scsi_abort_scsi_cmnd, + .eh_device_reset_handler = unf_scsi_device_reset_handler, + + .eh_target_reset_handler = unf_scsi_target_reset_handler, + .eh_bus_reset_handler = unf_scsi_bus_reset_handler, + .eh_host_reset_handler = NULL, + + .slave_configure = unf_scsi_slave_configure, + .slave_alloc = unf_scsi_slave_alloc, + .slave_destroy = unf_scsi_destroy_slave, + + .scan_finished = unf_scsi_scan_finished, + .scan_start = unf_scsi_scan_start, + + .this_id = -1, + .cmd_per_lun = 3, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = NULL, + .sg_tablesize = SG_ALL, + .max_sectors = 0xFFFF, + .supported_mode = MODE_INITIATOR, +}; + +static void unf_unmap_prot_sgl(struct scsi_cmnd *v_cmnd) +{ + struct device *dev; + + if ((scsi_get_prot_op(v_cmnd) != SCSI_PROT_NORMAL) && + hifc_dif_enable && (scsi_prot_sg_count(v_cmnd))) { + dev = v_cmnd->device->host->dma_dev; + dma_unmap_sg(dev, scsi_prot_sglist(v_cmnd), + (int)scsi_prot_sg_count(v_cmnd), + v_cmnd->sc_data_direction); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "scsi done cmd:%p op:%d,difsglcount:%d", + v_cmnd, scsi_get_prot_op(v_cmnd), + scsi_prot_sg_count(v_cmnd)); + } +} + +void unf_scsi_done(struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + struct scsi_cmnd *cmnd = NULL; + + UNF_CHECK_VALID(0x509, UNF_TRUE, v_scsi_cmnd, return); + cmnd = (struct scsi_cmnd *)v_scsi_cmnd->upper_cmnd; + UNF_CHECK_VALID(0x510, UNF_TRUE, cmnd, return); + UNF_CHECK_VALID(0x511, UNF_TRUE, cmnd->scsi_done, return); + + scsi_set_resid(cmnd, (int)v_scsi_cmnd->resid); + + cmnd->result = v_scsi_cmnd->result; + scsi_dma_unmap(cmnd); + unf_unmap_prot_sgl(cmnd); + return cmnd->scsi_done(cmnd); +} + +void unf_host_init_attr_setting(unf_scsi_host_s *scsi_host) +{ + struct unf_lport_s *lport = NULL; + unsigned int speed = FC_PORTSPEED_UNKNOWN; + + lport = (struct unf_lport_s *)scsi_host->hostdata[0]; + fc_host_supported_classes(scsi_host) = FC_COS_CLASS3; /* class_3 */ + fc_host_dev_loss_tmo(scsi_host) = + (unsigned int)unf_get_link_lose_tmo(lport); /* 30s */ + fc_host_node_name(scsi_host) = lport->node_name; + fc_host_port_name(scsi_host) = lport->port_name; + + fc_host_max_npiv_vports(scsi_host) = + (unsigned short)((lport == lport->root_lport) ? + lport->low_level_func.support_max_npiv_num : 0); + fc_host_npiv_vports_inuse(scsi_host) = 0; + fc_host_next_vport_number(scsi_host) = 0; + + /* About speed mode */ + if ((lport->low_level_func.fc_ser_max_speed == UNF_PORT_SPEED_32_G) && + (lport->card_type == UNF_FC_SERVER_BOARD_32_G)) { + speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT | + FC_PORTSPEED_8GBIT; + } else if ((lport->low_level_func.fc_ser_max_speed == + UNF_PORT_SPEED_16_G) && + (lport->card_type == UNF_FC_SERVER_BOARD_16_G)) { + speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | + FC_PORTSPEED_4GBIT; + } else if ((lport->low_level_func.fc_ser_max_speed == + UNF_PORT_SPEED_8_G) && + (lport->card_type == UNF_FC_SERVER_BOARD_8_G)) { + speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | + FC_PORTSPEED_2GBIT; + } + + fc_host_supported_speeds(scsi_host) = speed; +} + +int unf_alloc_scsi_host(unf_scsi_host_s **v_scsi_host, + struct unf_host_param_s *v_host_param) +{ + int ret = RETURN_ERROR; + struct Scsi_Host *scsi_host = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x512, UNF_TRUE, v_scsi_host, return RETURN_ERROR); + UNF_CHECK_VALID(0x513, UNF_TRUE, v_host_param, return RETURN_ERROR); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Alloc scsi host..."); + + /* Check L_Port validity */ + lport = (struct unf_lport_s *)(v_host_param->lport); + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is NULL and return directly"); + + return RETURN_ERROR; + } + + scsi_host_template.can_queue = v_host_param->can_queue; + scsi_host_template.cmd_per_lun = v_host_param->cmnd_per_lun; + scsi_host_template.sg_tablesize = v_host_param->sg_table_size; + scsi_host_template.max_sectors = v_host_param->max_sectors; + + /* Alloc scsi host */ + scsi_host = scsi_host_alloc(&scsi_host_template, + sizeof(unsigned long long)); + if (unlikely(!scsi_host)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Register scsi host failed"); + + return RETURN_ERROR; + } + + scsi_host->max_channel = v_host_param->max_channel; + scsi_host->max_lun = v_host_param->max_lun; + scsi_host->max_cmd_len = v_host_param->max_cmnd_len; + scsi_host->unchecked_isa_dma = 0; + scsi_host->hostdata[0] = (unsigned long)lport; /* save L_Port to scsi */ + scsi_host->unique_id = scsi_host->host_no; + scsi_host->max_id = v_host_param->max_id; + scsi_host->transportt = (lport == lport->root_lport) ? + scsi_transport_template : scsi_transport_template_v; + + /* register DIF/DIX protection */ + if (hifc_dif_enable) { + /* Enable DIF and DIX function */ + scsi_host_set_prot(scsi_host, hifc_dif_type); + + hifc_guard = SHOST_DIX_GUARD_CRC; + /* Enable IP checksum algorithm in DIX */ + if (dix_flag) + hifc_guard |= SHOST_DIX_GUARD_IP; + scsi_host_set_guard(scsi_host, hifc_guard); + } + + /* Add scsi host */ + ret = scsi_add_host(scsi_host, v_host_param->pdev); + if (unlikely(ret)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Add scsi host failed with return value %d", + ret); + + scsi_host_put(scsi_host); + return RETURN_ERROR; + } + + /* Set scsi host attribute */ + unf_host_init_attr_setting(scsi_host); + *v_scsi_host = scsi_host; + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Alloc and add scsi host(0x%llx) succeed", + (unsigned long long)scsi_host); + + return RETURN_OK; +} + +void unf_free_scsi_host(unf_scsi_host_s *v_scsi_host) +{ + struct Scsi_Host *scsi_host = NULL; + + scsi_host = v_scsi_host; + fc_remove_host(scsi_host); + scsi_remove_host(scsi_host); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Remove scsi host(%d) succeed", scsi_host->host_no); + + scsi_host_put(scsi_host); +} + +static int unf_get_protect_mode(struct unf_lport_s *lport, + struct scsi_cmnd *v_cmnd, + struct unf_scsi_cmd_s *v_scsi_cmnd) +{ + struct scsi_cmnd *cmd = NULL; + int difsegcnt = 0; + struct unf_dif_control_info_s *dif_control_info = NULL; + + cmd = v_cmnd; + dif_control_info = &v_scsi_cmnd->dif_control; + + switch (scsi_get_prot_op(cmd)) { + /* OS-HBA: Unprotected, HBA-Target: Protected */ + case SCSI_PROT_READ_STRIP: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_VERIFY_AND_DELETE; + break; + case SCSI_PROT_WRITE_INSERT: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_INSERT; + break; + + /* OS-HBA: Protected, HBA-Target: Unprotected */ + case SCSI_PROT_READ_INSERT: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_INSERT; + break; + case SCSI_PROT_WRITE_STRIP: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_VERIFY_AND_DELETE; + break; + + /* OS-HBA: Protected, HBA-Target: Protected */ + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_VERIFY_AND_FORWARD; + break; + + default: + dif_control_info->protect_opcode |= + UNF_DIF_ACTION_VERIFY_AND_FORWARD; + break; + } + + if (dif_sgl_mode) + dif_control_info->flags |= UNF_DIF_DOUBLE_SGL; + + dif_control_info->protect_opcode |= + UNF_VERIFY_CRC_MASK | UNF_VERIFY_LBA_MASK; + dif_control_info->dif_sge_count = scsi_prot_sg_count(cmd); + dif_control_info->dif_sgl = scsi_prot_sglist(cmd); + dif_control_info->start_lba = + cpu_to_le32(((uint32_t)(0xffffffff & scsi_get_lba(cmd)))); + + if (scsi_prot_sg_count(cmd)) { + difsegcnt = dma_map_sg(&lport->low_level_func.dev->dev, + scsi_prot_sglist(cmd), + (int)scsi_prot_sg_count(cmd), + cmd->sc_data_direction); + if (unlikely(!difsegcnt)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) cmd:%p map dif sgl err", + lport->port_id, cmd); + return UNF_RETURN_ERROR; + } + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "build scsi cmd:%p op:%d,difsglcount:%d,difsegcnt:%d", + cmd, scsi_get_prot_op(cmd), scsi_prot_sg_count(cmd), + difsegcnt); + return RETURN_OK; +} + +unsigned int unf_get_frame_entry_buf(void *v_up_cmnd, + void *v_driver_sgl, + void **v_upper_sgl, + unsigned int *v_port_id, + unsigned int *v_index, + char **v_buf, + unsigned int *v_buf_len) +{ +#define HIFC_1822_MAX_DMA_LENGTH (0x20000 - 1) + struct scatterlist *scsi_sgl = *v_upper_sgl; + + UNF_REFERNCE_VAR(v_up_cmnd); + UNF_REFERNCE_VAR(v_driver_sgl); + UNF_REFERNCE_VAR(v_port_id); + + if (unlikely(!scsi_sgl)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Command(0x%p) can not get SGL.", v_up_cmnd); + return RETURN_ERROR; + } + *v_buf = (char *)sg_dma_address(scsi_sgl); + *v_buf_len = sg_dma_len(scsi_sgl); + *v_upper_sgl = (void *)sg_next(scsi_sgl); + if (unlikely((*v_buf_len > HIFC_1822_MAX_DMA_LENGTH) || + (*v_buf_len == 0))) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Command(0x%p) dmalen:0x%x is not support.", + v_up_cmnd, *v_buf_len); + return RETURN_ERROR; + } + + return RETURN_OK; +} + +static int unf_scsi_queue_cmd(struct Scsi_Host *shost, + struct scsi_cmnd *v_cmnd) +{ + struct Scsi_Host *host = NULL; + struct scsi_cmnd *cmd = NULL; + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + unsigned int scsi_id = 0; + unsigned int en_scsi_state = 0; + int ret = SCSI_MLQUEUE_HOST_BUSY; + // unsigned int uiError = 0; + struct unf_lport_s *lport = NULL; + struct fc_rport *p_rport = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + unsigned int ret_value = 0; + struct unf_rport_s *rport = NULL; + unsigned int cmnd_result = 0; + unsigned int rport_state_err = 0; + unsigned int scan_device_cmd = 0; + unsigned long long raw_lun_id = 0; + int data_seg_cnt = 0; + + static atomic64_t ull_count; + host = shost; + cmd = v_cmnd; + UNF_CHECK_VALID(0x515, UNF_TRUE, host, return RETURN_ERROR); + UNF_CHECK_VALID(0x514, UNF_TRUE, cmd, return RETURN_ERROR); + + /* Get L_Port from scsi_cmnd */ + lport = (struct unf_lport_s *)host->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Check l_port failed, cmd(%p)", cmd); + + /* scsi_done & return 0 & I/O error */ + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + return 0; + } + + /* Check device/session local state by device_id */ + /* local SCSI_ID from device */ + scsi_id = (unsigned int)((unsigned long long)cmd->device->hostdata); + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) scsi_id(0x%x) is max than %d", + lport->port_id, scsi_id, UNF_MAX_SCSI_ID); + + /* scsi_done & return 0 & I/O error */ + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + return 0; + } + + scsi_image_table = &lport->rport_scsi_table; + UNF_SCSI_CMD_CNT(scsi_image_table, scsi_id, cmd->cmnd[0]); + + /* Get scsi r_port */ + /*lint -e666 -esym(666,*)*/ + p_rport = starget_to_rport(scsi_target(cmd->device)); + if (unlikely(!p_rport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) cmd(%p) to get scsi rport failed", + lport->port_id, cmd); + + /* scsi_done & return 0 & I/O error */ + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + ret_value = DID_NO_CONNECT; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret_value); + return 0; + } + + en_scsi_state = atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].en_scsi_state); + if (unlikely(en_scsi_state != UNF_SCSI_ST_ONLINE)) { + if (en_scsi_state == UNF_SCSI_ST_OFFLINE) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) scsi_state(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), target is busy", + lport->port_id, en_scsi_state, scsi_id, + p_rport, p_rport->scsi_target_id, cmd); + + scan_device_cmd = (cmd->cmnd[0] == INQUIRY) || + (cmd->cmnd[0] == REPORT_LUNS); + + /* report lun or inquiry cmd, if send failed, + * do not retry, prevent the scan_mutex in + * scsi host locked up by eachother + */ + if (scan_device_cmd) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) cmd(0x%x) DID_NO_CONNECT", + lport->port_id, host->host_no, + scsi_id, + (unsigned long long)cmd->device->lun, + cmd->cmnd[0]); + + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + ret_value = DID_NO_CONNECT; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, + ret_value); + + return 0; + } + + if (likely(scsi_image_table->wwn_rport_info_table)) { + if (likely(scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)) + atomic64_inc(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->target_busy); + } + + /* Target busy: need scsi retry */ + return SCSI_MLQUEUE_TARGET_BUSY; + } + /* timeout(DEAD): scsi_done & return 0 & I/O error */ + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), target is loss timeout", + lport->port_id, scsi_id, p_rport, + p_rport->scsi_target_id, cmd); + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + ret_value = DID_NO_CONNECT; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret_value); + + return 0; + } + + raw_lun_id = ((unsigned long long)cmd->device->lun << 16) & + 0x00000000ffff0000; + if (scsi_sg_count(cmd)) { + data_seg_cnt = dma_map_sg(&lport->low_level_func.dev->dev, + scsi_sglist(cmd), + (int)scsi_sg_count(cmd), + cmd->sc_data_direction); + if (unlikely(!data_seg_cnt)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), dma map sg err", + lport->port_id, scsi_id, + p_rport, p_rport->scsi_target_id, cmd); + cmd->result = DID_BUS_BUSY << 16; + cmd->scsi_done(cmd); + ret_value = DID_BUS_BUSY; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret_value); + return SCSI_MLQUEUE_HOST_BUSY; + } + } + + /* Construct local SCSI CMND info */ + /* save host_no to scsi_cmnd->scsi_host_id */ + scsi_cmnd.scsi_host_id = host->host_no; + scsi_cmnd.scsi_id = scsi_id; + scsi_cmnd.lun_id = raw_lun_id; + scsi_cmnd.data_direction = cmd->sc_data_direction; + scsi_cmnd.underflow = cmd->underflow; + scsi_cmnd.cmnd_len = cmd->cmd_len; + scsi_cmnd.pcmnd = cmd->cmnd; + scsi_cmnd.transfer_len = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + scsi_cmnd.sense_buf_len = SCSI_SENSE_DATA_LEN; + scsi_cmnd.sense_buf = cmd->sense_buffer; + scsi_cmnd.time_out = 0; + scsi_cmnd.upper_cmnd = cmd; + scsi_cmnd.drv_private = + (void *)(*(unsigned long long *)shost_priv(host)); + scsi_cmnd.entry_count = data_seg_cnt; + scsi_cmnd.sgl = scsi_sglist(cmd); + scsi_cmnd.pfn_unf_ini_get_sgl_entry = unf_get_frame_entry_buf; + scsi_cmnd.pfn_done = unf_scsi_done; + scsi_cmnd.pc_lun_id = (unsigned char *)&scsi_cmnd.lun_id; + scsi_cmnd.err_code_table_cout = ini_err_code_table_cnt1; + scsi_cmnd.err_code_table = ini_error_code_table1; + scsi_cmnd.world_id = 0xfffffffc; + scsi_cmnd.cmnd_sn = atomic64_inc_return(&ull_count); + if (unlikely(scsi_cmnd.cmnd_sn == 0)) + scsi_cmnd.cmnd_sn = atomic64_inc_return(&ull_count); + + if ((scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) && + hifc_dif_enable) { + ret = unf_get_protect_mode(lport, cmd, &scsi_cmnd); + if (ret != RETURN_OK) { + cmd->result = DID_BUS_BUSY << 16; + cmd->scsi_done(cmd); + ret_value = DID_BUS_BUSY; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, + ret_value); + scsi_dma_unmap(cmd); + return SCSI_MLQUEUE_HOST_BUSY; + } + } + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) transfer length(0x%x) cmd_len(0x%x) direction(0x%x) cmd(0x%x) under_flow(0x%x)", + lport->port_id, host->host_no, scsi_id, + (unsigned long long)cmd->device->lun, + scsi_cmnd.transfer_len, + scsi_cmnd.cmnd_len, cmd->sc_data_direction, + scsi_cmnd.pcmnd[0], scsi_cmnd.underflow); + /* Bind the Exchange address corresponding to scsi_cmnd to + * scsi_cmnd->host_scribble + */ + cmd->host_scribble = (unsigned char *)scsi_cmnd.cmnd_sn; + ret = unf_cm_queue_command(&scsi_cmnd); + if (ret != RETURN_OK) { + rport = unf_find_rport_by_scsi_id(lport, + ini_error_code_table1, + ini_err_code_table_cnt1, + scsi_id, + &cmnd_result); + rport_state_err = (!rport) || + (rport->lport_ini_state != + UNF_PORT_STATE_LINKUP) || + (rport->rp_state == UNF_RPORT_ST_CLOSING); + scan_device_cmd = (cmd->cmnd[0] == INQUIRY) || + (cmd->cmnd[0] == REPORT_LUNS); + + /* report lun or inquiry cmd if send failed, do not retry, + * prevent the scan_mutex in scsi host locked up by eachother + */ + if (rport_state_err && scan_device_cmd) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) cmd(0x%x) cmResult(0x%x) DID_NO_CONNECT", + lport->port_id, host->host_no, scsi_id, + (unsigned long long)cmd->device->lun, + cmd->cmnd[0], cmnd_result); + + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + ret_value = DID_NO_CONNECT; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, ret_value); + scsi_dma_unmap(cmd); + unf_unmap_prot_sgl(cmd); + return 0; + } + + /* Host busy: scsi need to retry */ + ret = SCSI_MLQUEUE_HOST_BUSY; + if (likely(scsi_image_table->wwn_rport_info_table)) { + if (likely(scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)) + atomic64_inc(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->host_busy); + } + scsi_dma_unmap(cmd); + unf_unmap_prot_sgl(cmd); + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) return(0x%x) to process INI IO falid", + lport->port_id, ret); + } + return ret; +} + +static int unf_scsi_abort_scsi_cmnd(struct scsi_cmnd *v_cmnd) +{ + /* SCSI ABORT Command --->>> FC ABTS */ + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + struct Scsi_Host *scsi_host = NULL; + int ret = FAILED; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + struct unf_lport_s *lport = NULL; + unsigned int scsi_id = 0; + unsigned int err_handle = 0; + + UNF_CHECK_VALID(0x516, UNF_TRUE, v_cmnd, return FAILED); + + lport = (struct unf_lport_s *)v_cmnd->device->host->hostdata[0]; + scsi_id = (unsigned int)((unsigned long long)v_cmnd->device->hostdata); + + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_ABORT_IO_TYPE; + UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, + scsi_id, err_handle); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[abort]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", + lport->port_id, scsi_id, + (unsigned int)v_cmnd->device->lun, + v_cmnd->cmnd[0]); + } else { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Lport(%p) is moving or null", lport); + + return UNF_SCSI_ABORT_FAIL; + } + + /* Check local SCSI_ID validity */ + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return UNF_SCSI_ABORT_FAIL; + } + + /* Block scsi (check rport state -> whether offline or not) */ + ret = fc_block_scsi_eh(v_cmnd); + if (unlikely(ret != 0)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Block scsi eh failed(0x%x)", ret); + + return ret; + } + + scsi_host = v_cmnd->device->host; + scsi_cmnd.scsi_host_id = scsi_host->host_no; // L_Port ID + scsi_cmnd.scsi_id = scsi_id; // R_Port ID (Target ID) + scsi_cmnd.lun_id = (unsigned long long)v_cmnd->device->lun; // LUN ID + scsi_cmnd.upper_cmnd = v_cmnd; // scsi_cmnd + // L_Port + scsi_cmnd.drv_private = + (void *)(*(unsigned long long *)shost_priv(scsi_host)); + scsi_cmnd.cmnd_sn = (unsigned long long)(v_cmnd->host_scribble); + scsi_cmnd.pc_lun_id = (unsigned char *)&scsi_cmnd.lun_id; + scsi_cmnd.pfn_done = unf_scsi_done; + scsi_cmnd.world_id = 0xfffffffc; + /* Process scsi Abort cmnd */ + ret = unf_cm_eh_abort_handler(&scsi_cmnd); + if (ret == UNF_SCSI_ABORT_SUCCESS) { + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_ABORT_IO_TYPE; + UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, + scsi_id, err_handle); + } + } + + return ret; +} + +static int unf_scsi_device_reset_handler(struct scsi_cmnd *v_cmnd) +{ + /* LUN reset */ + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + struct Scsi_Host *scsi_host = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + int ret = FAILED; + struct unf_lport_s *lport = NULL; + unsigned int scsi_id = 0; + unsigned int err_handle = 0; + + UNF_CHECK_VALID(0x517, UNF_TRUE, v_cmnd, return FAILED); + + lport = (struct unf_lport_s *)v_cmnd->device->host->hostdata[0]; + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_DEVICE_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, + scsi_id, err_handle); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[device_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", + lport->port_id, scsi_id, + (unsigned int)v_cmnd->device->lun, + v_cmnd->cmnd[0]); + } else { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is invalid"); + + return FAILED; + } + + /* Check local SCSI_ID validity */ + scsi_id = (unsigned int)((unsigned long long)v_cmnd->device->hostdata); + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return FAILED; + } + + /* Block scsi (check rport state -> whether offline or not) */ + ret = fc_block_scsi_eh(v_cmnd); + if (unlikely(ret != 0)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Block scsi eh failed(0x%x)", ret); + + return ret; + } + + scsi_host = v_cmnd->device->host; + scsi_cmnd.scsi_host_id = scsi_host->host_no; /* l_port id */ + scsi_cmnd.scsi_id = scsi_id; /* r_port id */ + scsi_cmnd.lun_id = (unsigned long long)v_cmnd->device->lun; /* lun id */ + scsi_cmnd.upper_cmnd = v_cmnd; /* scsi_cmnd */ + /* l_port */ + scsi_cmnd.drv_private = + (void *)(*(unsigned long long *)shost_priv(scsi_host)); + scsi_cmnd.pc_lun_id = (unsigned char *)&scsi_cmnd.lun_id; /* lun id */ + + /* Process scsi device/LUN reset cmnd */ + ret = unf_cm_eh_device_reset_handler(&scsi_cmnd); + if (ret == UNF_SCSI_ABORT_SUCCESS) { + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_DEVICE_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, + scsi_id, + err_handle); + } + } + + return ret; +} + +static int unf_scsi_bus_reset_handler(struct scsi_cmnd *v_cmnd) +{ + /* BUS Reset */ + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + struct unf_lport_s *lport = NULL; + struct Scsi_Host *scsi_host = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + int ret = FAILED; + unsigned int scsi_id = 0; + unsigned int err_handle = 0; + + UNF_CHECK_VALID(0x517, UNF_TRUE, v_cmnd, return FAILED); + + lport = (struct unf_lport_s *)v_cmnd->device->host->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return FAILED; + } + + /* Check local SCSI_ID validity */ + scsi_id = (unsigned int)((unsigned long long)v_cmnd->device->hostdata); + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return FAILED; + } + + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_BUS_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, + scsi_id, err_handle); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info][bus_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", + lport->port_id, scsi_id, + (unsigned int)v_cmnd->device->lun, + v_cmnd->cmnd[0]); + } + + /* Block scsi (check rport state -> whether offline or not) */ + ret = fc_block_scsi_eh(v_cmnd); + if (unlikely(ret != 0)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Block scsi eh failed(0x%x)", ret); + + return ret; + } + + scsi_host = v_cmnd->device->host; + scsi_cmnd.scsi_host_id = scsi_host->host_no; /* l_port id */ + scsi_cmnd.scsi_id = scsi_id; /* r_port id */ + scsi_cmnd.lun_id = (unsigned long long)v_cmnd->device->lun; /* lun id */ + scsi_cmnd.upper_cmnd = v_cmnd; /* scsi_cmnd */ + /* l_port */ + scsi_cmnd.drv_private = + (void *)(*(unsigned long long *)shost_priv(scsi_host)); + scsi_cmnd.pc_lun_id = (unsigned char *)&scsi_cmnd.lun_id; /* lun id */ + + /* Process scsi BUS Reset cmnd */ + ret = unf_cm_bus_reset_handler(&scsi_cmnd); + if (ret == UNF_SCSI_ABORT_SUCCESS) { + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_BUS_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, + scsi_id, + err_handle); + } + } + + return ret; +} + +static int unf_scsi_target_reset_handler(struct scsi_cmnd *v_cmnd) +{ + /* Session reset/delete */ + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + struct Scsi_Host *scsi_host = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + int ret = FAILED; + struct unf_lport_s *lport = NULL; + unsigned int scsi_id = 0; + unsigned int err_handle = 0; + + UNF_CHECK_VALID(0x517, UNF_TRUE, v_cmnd, return RETURN_ERROR); + + lport = (struct unf_lport_s *)v_cmnd->device->host->hostdata[0]; + if (unlikely(!lport)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is null"); + + return FAILED; + } + + /* Check local SCSI_ID validity */ + scsi_id = (unsigned int)((unsigned long long)v_cmnd->device->hostdata); + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return FAILED; + } + + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_TARGET_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, scsi_id, + err_handle); + + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[target_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", + lport->port_id, scsi_id, + (unsigned int)v_cmnd->device->lun, + v_cmnd->cmnd[0]); + } + + /* Block scsi (check rport state -> whether offline or not) */ + ret = fc_block_scsi_eh(v_cmnd); + if (unlikely(ret != 0)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Block scsi eh failed(0x%x)", ret); + + return ret; + } + + scsi_host = v_cmnd->device->host; + scsi_cmnd.scsi_host_id = scsi_host->host_no; /* l_port id */ + scsi_cmnd.scsi_id = scsi_id; /* r_port id */ + scsi_cmnd.lun_id = (unsigned long long)v_cmnd->device->lun; /* lun id */ + scsi_cmnd.upper_cmnd = v_cmnd; /* scsi_cmnd */ + /* l_port */ + scsi_cmnd.drv_private = + (void *)(*(unsigned long long *)shost_priv(scsi_host)); + scsi_cmnd.pc_lun_id = (unsigned char *)&scsi_cmnd.lun_id; /* lun id */ + + /* Process scsi Target/Session reset/delete cmnd */ + ret = unf_cm_target_reset_handler(&scsi_cmnd); + if (ret == UNF_SCSI_ABORT_SUCCESS) { + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + err_handle = UNF_SCSI_TARGET_RESET_TYPE; + UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, + scsi_id, err_handle); + } + } + + return ret; +} + +static int unf_scsi_slave_alloc(struct scsi_device *sdev) +{ + /*lint -e666 -esym(666,*)*/ + struct fc_rport *rport = NULL; + unsigned int scsi_id = 0; + struct unf_lport_s *lport = NULL; + struct Scsi_Host *host = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + + /* About device */ + if (unlikely(!sdev)) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SDev is null"); + + return -ENXIO; + } + + /* About scsi rport */ + rport = starget_to_rport(scsi_target(sdev)); + if (unlikely(!rport || fc_remote_port_chkready(rport))) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SCSI rport is null"); + + if (rport) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SCSI rport is not ready(0x%x)", + fc_remote_port_chkready(rport)); + } + + return -ENXIO; + } + + /* About host */ + host = rport_to_shost(rport); + if (unlikely(!host)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Host is null"); + + return -ENXIO; + } + + /* About Local Port */ + lport = (struct unf_lport_s *)host->hostdata[0]; + if (unf_is_lport_valid(lport) != RETURN_OK) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port is invalid"); + + return -ENXIO; + } + + /* About Local SCSI_ID */ + /* use local SCSI_ID to alloc slave device */ + scsi_id = *(unsigned int *)rport->dd_data; + if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]scsi_id(0x%x) is max than(0x%x)", + scsi_id, UNF_MAX_SCSI_ID); + + return -ENXIO; + } + + scsi_image_table = &lport->rport_scsi_table; + if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + atomic_inc(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->device_alloc); + + atomic_inc(&lport->device_alloc); + /* save local SCSI_ID */ + sdev->hostdata = (void *)(unsigned long long)scsi_id; + + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) use scsi_id(%d) to alloc device[%u:%u:%u:%u]", + lport->port_id, scsi_id, host->host_no, + sdev->channel, sdev->id, (unsigned int)sdev->lun); + + return 0; +} + +static void unf_scsi_destroy_slave(struct scsi_device *sdev) +{ + /* + * NOTE: about sdev->hostdata + * --->>> pointing to local SCSI_ID + * 1. Assignment during slave allocation + * 2. Released when callback for slave destroy + * 3. Used during: Queue_CMND, Abort CMND, Device Reset, + * Target Reset & Bus Reset + */ + /*lint -e666 -esym(666,*)*/ + struct fc_rport *rport = NULL; + unsigned int scsi_id = 0; + struct unf_lport_s *lport = NULL; + struct Scsi_Host *host = NULL; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + + /* About scsi device */ + if (unlikely(!sdev)) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SDev is null"); + return; + } + + /* About scsi rport */ + rport = starget_to_rport(scsi_target(sdev)); + if (unlikely(!rport)) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]SCSI rport is null or remote port is not ready"); + return; + } + + /* About host */ + host = rport_to_shost(rport); + if (unlikely(!host)) { + UNF_TRACE(0x3808, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Host is null"); + return; + } + + /* About L_Port */ + lport = (struct unf_lport_s *)host->hostdata[0]; + if (unf_is_lport_valid(lport) == RETURN_OK) { + scsi_image_table = &lport->rport_scsi_table; + atomic_inc(&lport->device_destroy); + + scsi_id = (unsigned int)((unsigned long long)sdev->hostdata); + if ((scsi_id < UNF_MAX_SCSI_ID) && + (scsi_image_table->wwn_rport_info_table)) { + if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) + atomic_inc(&scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter->device_destroy); + + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) with scsi_id(%d) to destroy slave device[%u:%u:%u:%u]", + lport->port_id, scsi_id, host->host_no, + sdev->channel, sdev->id, + (unsigned int)sdev->lun); + } else { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[err]Port(0x%x) scsi_id(%d) is invalid and destroy device[%u:%u:%u:%u]", + lport->port_id, scsi_id, host->host_no, + sdev->channel, sdev->id, + (unsigned int)sdev->lun); + } + } else { + UNF_TRACE(0x3097, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(%p) is invalid", lport); + } + + sdev->hostdata = NULL; /* reset local SCSI_ID */ +} + +static int unf_scsi_slave_configure(struct scsi_device *sdev) +{ +#define UNF_SCSI_DEV_DEPTH 32 + blk_queue_update_dma_alignment(sdev->request_queue, 0x7); + scsi_change_queue_depth(sdev, UNF_SCSI_DEV_DEPTH); + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[event]Enter slave configure, set depth is %d, sdev->tagged_supported is (%d)", + UNF_SCSI_DEV_DEPTH, sdev->tagged_supported); + + return 0; +} + +static int unf_scsi_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Scan finished"); + + return 1; +} + +static void unf_scsi_scan_start(struct Scsi_Host *shost) +{ + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Start scsi scan..."); +} + +unsigned int unf_register_ini_transport(void) +{ + /* Register INI Transport */ + scsi_transport_template = fc_attach_transport(&function_template); + + if (!scsi_transport_template) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Register FC transport to scsi failed"); + + return RETURN_ERROR; + } + + scsi_transport_template_v = fc_attach_transport(&function_template_v); + if (!scsi_transport_template_v) { + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Register FC vport transport to scsi failed"); + + fc_release_transport(scsi_transport_template); + + return RETURN_ERROR; + } + + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Register FC transport to scsi succeed"); + + return RETURN_OK; +} + +void unf_unregister_ini_transport(void) +{ + fc_release_transport(scsi_transport_template); + fc_release_transport(scsi_transport_template_v); + UNF_TRACE(0x4101, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[event]Unregister FC transport succeed"); +} + +void unf_report_io_dm_event(void *v_lport, unsigned int type, + unsigned int value) +{ +} + +void unf_save_sense_data(void *scsicmd, const char *sense, int senslen) +{ + struct scsi_cmnd *cmd; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scsicmd, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, sense, return); + + cmd = (struct scsi_cmnd *)scsicmd; + memcpy(cmd->sense_buffer, sense, senslen); +} diff --git a/drivers/scsi/huawei/hifc/unf_scsi_common.h b/drivers/scsi/huawei/hifc/unf_scsi_common.h new file mode 100644 index 000000000000..59580ad2e63e --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_scsi_common.h @@ -0,0 +1,1136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_SCSI_COMMON__ +#define __UNF_SCSI_COMMON__ + +#include "unf_log.h" +#include "hifc_knl_adp.h" + +#define DRV_ISCSI_NAME 223 + +#define SCSI_SENSE_DATA_LEN 96 + +#define DRV_SCSI_CDB_LEN 16 +#define DRV_SCSI_LUN_LEN 8 +#define DRV_PORTID_NUM 32 + +#ifndef SUCCESS +#define SUCCESS 0x2002 +#endif + +#ifndef FAILED +#define FAILED 0x2003 +#endif + +#ifndef FC_PORTSPEED_32GBIT +#define FC_PORTSPEED_32GBIT 0x40 +#endif + +/* + * FCTL defines (FrameHdr.Type_Fctl) + */ +#define FC_EXCHANGE_RESPONDER 0x00800000 +#define FC_LAST_SEQUENCE 0x00100000 +#define FC_END_SEQUENCE 0x00080000 +#define FC_SEQUENCE_INITIATIVE 0x00010000 + +/* + * FCTL common use defines + */ +#define FC_FCTL_RSP (FC_EXCHANGE_RESPONDER | FC_LAST_SEQUENCE | \ + FC_END_SEQUENCE) + +#define UNF_GID_PORT_CNT 2048 +#define UNF_RSCN_PAGE_SUM 255 + +#define UNF_CPU_ENDIAN + +#define UNF_NPORTID_MASK 0x00FFFFFF +#define UNF_DOMAIN_MASK 0x00FF0000 +#define UNF_AREA_MASK 0x0000FF00 +#define UNF_ALPA_MASK 0x000000FF + +#define UNF_NPORTID_WELLKNOWN_MASK 0x00fffff0 + +#define UNF_SCSI_ABORT_SUCCESS SUCCESS +#define UNF_SCSI_ABORT_FAIL FAILED + +#define UNF_SCSI_STATUS(byte) (byte) +#define UNF_SCSI_MSG(byte) ((byte) << 8) +#define UNF_SCSI_HOST(byte) ((byte) << 16) +#define UNF_SCSI_DRIVER(byte) ((byte) << 24) + +#define UNF_GET_SCSI_HOST_ID(scsi_host) ((scsi_host)->host_no) + +struct unf_fchead_s { + /* Routing control and Destination address of the seq */ + unsigned int rctl_did; + /* Class control and Source address of the sequence */ + unsigned int csctl_sid; + /* Data type and Initial frame control value of the seq */ + unsigned int type_fctl; + /* Seq ID, Data Field and Initial seq count */ + unsigned int seq_id_dfctl_seq_cnt; + /* Originator & Responder exchange IDs for the sequence */ + unsigned int oxid_rxid; + /* Relative offset of the first frame of the sequence */ + unsigned int parameter; +}; + +#define UNF_FCPRSP_CTL_LEN (24) +#define UNF_MAX_RSP_INFO_LEN (8) +#define UNF_RSP_LEN_VLD (1 << 0) +#define UNF_SENSE_LEN_VLD (1 << 1) +#define UNF_RESID_OVERRUN (1 << 2) +#define UNF_RESID_UNDERRUN (1 << 3) + +/* T10: FCP2r.07 9.4.1 Overview and format of FCP_RSP IU */ +struct unf_fcprsp_iu_s { + unsigned int ui_reserved[2]; + unsigned char uc_reserved[2]; + unsigned char control; + unsigned char fcp_status; + unsigned int fcp_residual; + unsigned int fcp_sense_len; /* Length of sense info field */ + /* Length of response info field in bytes 0,4 or 8 */ + unsigned int fcp_response_len; + /* Buffer for response info */ + unsigned char fcp_rsp_info[UNF_MAX_RSP_INFO_LEN]; + /* Buffer for sense info */ + unsigned char fcp_sense_info[SCSI_SENSE_DATA_LEN]; +} __attribute__((packed)); + +#define UNF_CMD_REF_MASK 0xFF000000 +#define UNF_TASK_ATTR_MASK 0x00070000 +#define UNF_TASK_MGMT_MASK 0x0000FF00 +#define UNF_FCP_WR_DATA 0x00000001 +#define UNF_FCP_RD_DATA 0x00000002 +#define UNF_CDB_LEN_MASK 0x0000007C +#define UNF_FCP_CDB_LEN_16 (16) +#define UNF_FCP_CDB_LEN_32 (32) +#define UNF_FCP_LUNID_LEN_8 (8) + +/* FCP-4 :Table 27 - RSP_CODE field */ +#define UNF_FCP_TM_RSP_COMPLETE (0) +#define UNF_FCP_TM_INVALID_CMND (0x2) +#define UNF_FCP_TM_RSP_REJECT (0x4) +#define UNF_FCP_TM_RSP_FAIL (0x5) +#define UNF_FCP_TM_RSP_SUCCEED (0x8) +#define UNF_FCP_TM_RSP_INCRECT_LUN (0x9) + +#define UNF_SET_TASK_MGMT_FLAGS(v_fcp_tm_code) ((v_fcp_tm_code) << 8) +#define UNF_GET_TASK_MGMT_FLAGS(v_control) \ + (((v_control) & UNF_TASK_MGMT_MASK) >> 8) + +enum unf_task_mgmt_cmnd_e { + UNF_FCP_TM_QUERY_TASK_SET = (1 << 0), + UNF_FCP_TM_ABORT_TASK_SET = (1 << 1), + UNF_FCP_TM_CLEAR_TASK_SET = (1 << 2), + UNF_FCP_TM_QUERY_UNIT_ATTENTION = (1 << 3), + UNF_FCP_TM_LOGICAL_UNIT_RESET = (1 << 4), + UNF_FCP_TM_TARGET_RESET = (1 << 5), + UNF_FCP_TM_CLEAR_ACA = (1 << 6), + UNF_FCP_TM_TERMINATE_TASK = (1 << 7) /* obsolete */ +}; + +struct unf_fcp_cmnd_s { + unsigned char lun[UNF_FCP_LUNID_LEN_8]; /* Logical unit number */ + + unsigned int control; /* Control field : + * uint8_t cmnd_ref; + * uint8_t task_attr:3; + * uint8_t reserved:5; + * uint8_t task_mgmt_flags; + * uint8_t wrdata:1; + * uint8_t rddata:1; + * uint8_t add_cdb_len:6; + */ + /* Payload data containing cdb info */ + unsigned char cdb[UNF_FCP_CDB_LEN_16]; + /* Number of bytes expected to be transferred */ + unsigned int data_length; +} __attribute__((packed)); + +struct unf_fcp_cmd_hdr_s { + struct unf_fchead_s frame_hdr; /* FCHS structure */ + struct unf_fcp_cmnd_s fcp_cmnd; /* Fcp Cmnd struct */ +}; + +/* + * parameter struct + */ + +/* Common Services Parameter used for returning Fabric + * parameters. See FC-FS Rev. 1.90, FC-PH-3 Rev. 9.4 and see FC-DA 3.1. + * This is the structure that is used to enquire Fabric parameters + * after a Fabric login is successful. The fileds in this structure + * are relevant for FLOGI ACC. + */ + +/* FC-LS-2 Table 140 Common Service Parameter applicability */ +struct unf_fabric_coparms_s { +#if defined(UNF_CPU_ENDIAN) + unsigned int bb_credit : 16; /* 0 [0-15] */ + unsigned int lowest_version : 8; /* 0 [16-23] */ + unsigned int highest_version : 8; /* 0 [24-31] */ +#else + unsigned int highest_version : 8; /* 0 [24-31] */ + unsigned int lowest_version : 8; /* 0 [16-23] */ + unsigned int bb_credit : 16; /* 0 [0-15] */ +#endif + + /* Word1 Common Features */ +#if defined(UNF_CPU_ENDIAN) + unsigned int bb_receive_data_field_size : 12; /* 1 [0-11] */ + unsigned int bb_scn : 4; /* 1 [12-15] */ + unsigned int payload_length : 1; /* 1 [16] */ + unsigned int seq_cnt : 1; /* 1 [17] */ + unsigned int dynamic_half_duplex : 1; /* 1 [18] */ + unsigned int r_t_tov : 1; /* 1 [19] */ + unsigned int reserved_co2 : 6; /* 1 [20-25] */ + unsigned int e_d_tov_resolution : 1; /* 1 [26] */ + unsigned int alternate_bb_credit_mgmt : 1; /* 1 [27] */ + unsigned int n_port : 1; /* 1 [28] */ + unsigned int mnid_assignment : 1; /* 1 [29] */ + unsigned int random_relative_offset : 1; /* 1 [30] */ + unsigned int clean_address : 1; /* 1 [31] */ +#else + unsigned int reserved_co22 : 2; /* 1 [24-25] */ + unsigned int e_d_tov_resolution : 1; /* 1 [26] */ + unsigned int alternate_bb_credit_mgmt : 1; /* 1 [27] */ + unsigned int n_port : 1; /* 1 [28] */ + unsigned int mnid_assignment : 1; /* 1 [29] */ + unsigned int random_relative_offset : 1; /* 1 [30] */ + unsigned int clean_address : 1; /* 1 [31] */ + + unsigned int payload_length : 1; /* 1 [16] */ + unsigned int seq_cnt : 1; /* 1 [17] */ + unsigned int dynamic_half_duplex : 1; /* 1 [18] */ + unsigned int r_t_tov : 1; /* 1 [19] */ + unsigned int reserved_co25 : 4; /* 1 [20-23] */ + + unsigned int bb_receive_data_field_size : 12; /* 1 [0-11] */ + unsigned int bb_scn : 4; /* 1 [12-15] */ +#endif + unsigned int r_a_tov; /* 2 [0-31] */ + unsigned int e_d_tov; /* 3 [0-31] */ +}; + +/* + * Common Services Parameter 16 byte structure. + * See FC-PH 4.3 Section 23.6.3, FC-PLDA Section 5.2 and + * TachLite Users Manual 3.24.1 + * the structure does not need to be packed. + */ + +/* FC-LS-2 Table 140 Common Service Parameter applicability */ +/* Table 142 Common Service Parameters - PLOGI and PLOGI LS_ACC */ +struct unf_lgn_port_coparms_s { +#if defined(UNF_CPU_ENDIAN) + unsigned int bb_credit : 16; /* 0 [0-15] */ + unsigned int lowest_version : 8; /* 0 [16-23] */ + unsigned int highest_version : 8; /* 0 [24-31] */ +#else + unsigned int highest_version : 8; /* 0 [24-31] */ + unsigned int lowest_version : 8; /* 0 [16-23] */ + unsigned int bb_credit : 16; /* 0 [0-15] */ +#endif + +#if defined(UNF_CPU_ENDIAN) + unsigned int bb_receive_data_field_size : 12; /* 1 [0-11] */ + unsigned int bb_scn : 4; /* 1 [12-15] */ + unsigned int payload_length : 1; /* 1 [16] */ + unsigned int seq_cnt : 1; /* 1 [17] */ + unsigned int dynamic_half_duplex : 1; /* 1 [18] */ + unsigned int reserved_co2 : 7; /* 1 [19-25] */ + unsigned int e_d_tov_resolution : 1; /* 1 [26] */ + unsigned int alternate_bb_credit_mgmt : 1; /* 1 [27] */ + unsigned int n_port : 1; /* 1 [28] */ + unsigned int vendor_version_level : 1; /* 1 [29] */ + unsigned int random_relative_offset : 1; /* 1 [30] */ + unsigned int continuously_increasing : 1; /* 1 [31] */ +#else + unsigned int reserved_co22 : 2; /* 1 [24-25] */ + unsigned int e_d_tov_resolution : 1; /* 1 [26] */ + unsigned int alternate_bb_credit_mgmt : 1; /* 1 [27] */ + unsigned int n_port : 1; /* 1 [28] */ + unsigned int vendor_version_level : 1; /* 1 [29] */ + unsigned int random_relative_offset : 1; /* 1 [30] */ + unsigned int continuously_increasing : 1; /* 1 [31] */ + + unsigned int payload_length : 1; /* 1 [16] */ + unsigned int seq_cnt : 1; /* 1 [17] */ + unsigned int dynamic_half_duplex : 1; /* 1 [18] */ + unsigned int reserved_co25 : 5; /* 1 [19-23] */ + + unsigned int bb_receive_data_field_size : 12; /* 1 [0-11] */ + unsigned int reserved_co1 : 4; /* 1 [12-15] */ +#endif + +#if defined(UNF_CPU_ENDIAN) + unsigned int relative_offset : 16; /* 2 [0-15] */ + unsigned int nport_total_concurrent_sequences : 16; /* 2 [16-31] */ +#else + unsigned int nport_total_concurrent_sequences : 16; /* 2 [16-31] */ + unsigned int relative_offset : 16; /* 2 [0-15] */ +#endif + + unsigned int e_d_tov; +}; + +/* + * Class services 16 byte structure. See FC-PH 4.3 Section 23.6.8 and + * FC-PLDA Section 5.3 + * the structure does not need to be packed + */ + +/* FC-LS-2 Table 145 Class Service Parameters Applicability */ +struct unf_lgn_port_clparms_s { +#if defined(UNF_CPU_ENDIAN) + unsigned int reserved_cl1 : 6; /* 0 [0-5] */ + unsigned int ic_data_compression_history_buffer_size : 2; /* 0 [6-7] */ + unsigned int ic_data_compression_capable : 1; /* 0 [8] */ + + unsigned int ic_ack_generation_assistance : 1; /* 0 [9] */ + unsigned int ic_ack_n_capable : 1; /* 0 [10] */ + unsigned int ic_ack_o_capable : 1; /* 0 [11] */ + /* 0 [12-13] */ + unsigned int ic_initial_responder_processes_accociator : 2; + unsigned int ic_x_id_reassignment : 2; /* 0 [14-15] */ + + unsigned int reserved_cl2 : 7; /* 0 [16-22] */ + unsigned int priority : 1; /* 0 [23] */ + unsigned int buffered_class : 1; /* 0 [24] */ + unsigned int camp_on : 1; /* 0 [25] */ + unsigned int dedicated_simplex : 1; /* 0 [26] */ + unsigned int sequential_delivery : 1; /* 0 [27] */ + unsigned int stacked_connect_request : 2; /* 0 [28-29] */ + unsigned int intermix_mode : 1; /* 0 [30] */ + unsigned int valid : 1; /* 0 [31] */ +#else + unsigned int buffered_class : 1; /* 0 [24] */ + unsigned int camp_on : 1; /* 0 [25] */ + unsigned int dedicated_simplex : 1; /* 0 [26] */ + unsigned int sequential_delivery : 1; /* 0 [27] */ + unsigned int stacked_connect_request : 2; /* 0 [28-29] */ + unsigned int intermix_mode : 1; /* 0 [30] */ + unsigned int valid : 1; /* 0 [31] */ + unsigned int reserved_cl2 : 7; /* 0 [16-22] */ + unsigned int priority : 1; /* 0 [23] */ + unsigned int ic_data_compression_capable : 1; /* 0 [8] */ + unsigned int ic_ack_generation_assistance : 1; /* 0 [9] */ + unsigned int ic_ack_n_capable : 1; /* 0 [10] */ + unsigned int ic_ack_o_capable : 1; /* 0 [11] */ + /* 0 [12-13] */ + unsigned int ic_initial_responder_processes_accociator : 2; + unsigned int ic_x_id_reassignment : 2; /* 0 [14-15] */ + + unsigned int reserved_cl1 : 6; /* 0 [0-5] */ + /* 0 [6-7] */ + unsigned int ic_data_compression_history_buffer_size : 2; +#endif + +#if defined(UNF_CPU_ENDIAN) + unsigned int received_data_field_size : 16; /* 1 [0-15] */ + + unsigned int reserved_cl3 : 5; /* 1 [16-20] */ + /* 1 [21-22] */ + unsigned int rc_data_compression_history_buffer_size : 2; + unsigned int rc_data_compression_capable : 1; /* 1 [23] */ + + unsigned int rc_categories_per_sequence : 2; /* 1 [24-25] */ + unsigned int reserved_cl4 : 1; /* 1 [26] */ + unsigned int rc_error_policy_supported : 2; /* 1 [27-28] */ + unsigned int rc_x_id_interlock : 1; /* 1 [29] */ + unsigned int rc_ack_n_capable : 1; /* 1 [30] */ + unsigned int rc_ack_o_capable : 1; /* 1 [31] */ +#else + unsigned int rc_categories_per_sequence : 2; /* 1 [24-25] */ + unsigned int reserved_cl4 : 1; /* 1 [26] */ + unsigned int rc_error_policy_supported : 2; /* 1 [27-28] */ + unsigned int rc_x_id_interlock : 1; /* 1 [29] */ + unsigned int rc_ack_n_capable : 1; /* 1 [30] */ + unsigned int rc_ack_o_capable : 1; /* 1 [31] */ + + unsigned int reserved_cl3 : 5; /* 1 [16-20] */ + /* 1 [21-22] */ + unsigned int rc_data_compression_history_buffer_size : 2; + unsigned int rc_data_compression_capable : 1; /* 1 [23] */ + + unsigned int received_data_field_size : 16; /* 1 [0-15] */ +#endif + +#if defined(UNF_CPU_ENDIAN) + unsigned int n_port_end_to_end_credit : 15; /* 2 [0-14] */ + unsigned int reserved_cl5 : 1; /* 2 [15] */ + + unsigned int concurrent_sequences : 16; /* 2 [16-31] */ +#else + unsigned int concurrent_sequences : 16; /* 2 [16-31] */ + + unsigned int n_port_end_to_end_credit : 15; /* 2 [0-14] */ + unsigned int reserved_cl5 : 1; /* 2 [15] */ +#endif + +#if defined(UNF_CPU_ENDIAN) + unsigned int reserved_cl6 : 16; /* 3 [0-15] */ + unsigned int open_sequences_per_exchange : 16; /* 3 [16-31] */ +#else + unsigned int open_sequences_per_exchange : 16; /* 3 [16-31] */ + unsigned int reserved_cl6 : 16; /* 3 [0-15] */ +#endif +}; + +struct unf_fabric_parms_s { + struct unf_fabric_coparms_s co_parms; + unsigned int high_port_name; + unsigned int low_port_name; + unsigned int high_node_name; + unsigned int low_node_name; + struct unf_lgn_port_clparms_s cl_parms[3]; + unsigned int reserved_1[4]; + unsigned int vendor_version_level[4]; +}; + +struct unf_lgn_parms_s { + struct unf_lgn_port_coparms_s co_parms; + unsigned int high_port_name; + unsigned int low_port_name; + unsigned int high_node_name; + unsigned int low_node_name; + struct unf_lgn_port_clparms_s cl_parms[3]; + unsigned int reserved_1[4]; + unsigned int vendor_version_level[4]; +}; + +#define ELS_RJT 0x1 +#define ELS_ACC 0x2 +#define ELS_PLOGI 0x3 +#define ELS_FLOGI 0x4 +#define ELS_LOGO 0x5 +#define ELS_RLS 0xf +#define ELS_ECHO 0x10 +#define ELS_RRQ 0x12 +#define ELS_REC 0x13 +#define ELS_PRLI 0x20 +#define ELS_PRLO 0x21 +#define ELS_TPRLO 0x24 +#define ELS_PDISC 0x50 +#define ELS_FDISC 0x51 +#define ELS_ADISC 0x52 +#define ELS_RSCN 0x61 /* registered state change notification */ +#define ELS_SCR 0x62 /* state change registration */ + +#define NS_GIEL 0X0101 +#define NS_GA_NXT 0X0100 +#define NS_GPN_ID 0x0112 /* get port name by ID */ +#define NS_GNN_ID 0x0113 /* get node name by ID */ +#define NS_GFF_ID 0x011f /* get FC-4 features by ID */ +#define NS_GID_PN 0x0121 /* get ID for port name */ +#define NS_GID_NN 0x0131 /* get IDs for node name */ +#define NS_GID_FT 0x0171 /* get IDs by FC4 type */ +#define NS_GPN_FT 0x0172 /* get port names by FC4 type */ +#define NS_GID_PT 0x01a1 /* get IDs by port type */ +#define NS_RFT_ID 0x0217 /* reg FC4 type for ID */ +#define NS_RPN_ID 0x0212 /* reg port name for ID */ +#define NS_RNN_ID 0x0213 /* reg node name for ID */ +#define NS_RSNPN 0x0218 /* reg symbolic port name */ +#define NS_RFF_ID 0x021f /* reg FC4 Features for ID */ +#define NS_RSNN 0x0239 /* reg symbolic node name */ +#define ST_NULL 0xffff /* reg symbolic node name */ + +#define BLS_ABTS 0xA001 /* ABTS */ + +#define FCP_SRR 0x14 /* Sequence Retransmission Request */ + +#define UNF_FC_FID_DOM_MGR 0xfffc00 /* domain manager base */ +enum unf_fc_well_known_fabric_id { + UNF_FC_FID_NONE = 0x000000, /* No destination */ + UNF_FC_FID_DOM_CTRL = 0xfffc01, /* domain controller */ + UNF_FC_FID_BCAST = 0xffffff, /* broadcast */ + UNF_FC_FID_FLOGI = 0xfffffe, /* fabric login */ + UNF_FC_FID_FCTRL = 0xfffffd, /* fabric controller */ + UNF_FC_FID_DIR_SERV = 0xfffffc, /* directory server */ + UNF_FC_FID_TIME_SERV = 0xfffffb, /* time server */ + UNF_FC_FID_MGMT_SERV = 0xfffffa, /* management server */ + UNF_FC_FID_QOS = 0xfffff9, /* QoS Facilitator */ + UNF_FC_FID_ALIASES = 0xfffff8, /* alias server (FC-PH2) */ + UNF_FC_FID_SEC_KEY = 0xfffff7, /* Security key dist. server */ + UNF_FC_FID_CLOCK = 0xfffff6, /* clock synch server */ + UNF_FC_FID_MCAST_SERV = 0xfffff5 /* multicast server */ +}; + +#define DRV_ENTRY_PER_SGL 64 /* Size of an entry array in a hash table */ +#define DRV_DATA_PROTECTION_LEN 8 + +struct dif_result_info_s { + unsigned char actual_dif[DRV_DATA_PROTECTION_LEN]; + unsigned char expected_dif[DRV_DATA_PROTECTION_LEN]; +}; + +struct drv_sge { + char *buf; + void *page_ctrl; + unsigned int length; + unsigned int offset; +}; + +/* + * @enum drv_io_direction + * SCSI data direction + */ +enum drv_io_direction { + DRV_IO_BIDIRECTIONAL = 0, + DRV_IO_DIRECTION_WRITE = 1, + DRV_IO_DIRECTION_READ = 2, + DRV_IO_DIRECTION_NONE = 3, +}; + +/* + * Hash table data structure + */ +struct drv_sgl { + struct drv_sgl *next_sgl; /* poin to SGL,SGL list */ + unsigned short num_sges_in_chain; + unsigned short num_sges_in_sgl; + unsigned int flag; + unsigned long long serial_num; + struct drv_sge sge[DRV_ENTRY_PER_SGL]; + struct list_head node; + unsigned int cpu_id; +}; + +struct dif_info_s { + /* Indicates the result returned when the data + * protection information is inconsistent,add by pangea + */ + struct dif_result_info_s dif_result; + /* Data protection information operation code + * bit[31-24] other operation code + * bit[23-16] Data Protection Information Operation + * bit[15-8] Data protection information verification + * bit[7-0] Data protection information replace + */ + unsigned int protect_opcode; + unsigned short app_tag; + unsigned long long start_lba; /* IO start LBA */ + struct drv_sgl *protection_sgl; +}; + +typedef struct Scsi_Host unf_scsi_host_s; + +struct unf_ini_error_code_s { + unsigned int drv_err_code; /* driver error code */ + unsigned int ap_err_code; /* up level error code */ +}; + +typedef unsigned int (*ini_get_sgl_entry_buf)(void *v_upper_cmnd, + void *v_driver_sgl, + void **v_upper_sgl, + unsigned int *v_req_index, + unsigned int *v_index, + char **v_buf, + unsigned int *v_buf_len); + +struct unf_host_param_s { + int can_queue; + unsigned short sg_table_size; + short cmnd_per_lun; + unsigned int max_id; + unsigned int max_lun; + unsigned int max_channel; + unsigned short max_cmnd_len; + unsigned short max_sectors; + unsigned long long dma_boundary; + unsigned int port_id; + void *lport; + struct device *pdev; +}; + +#define UNF_DIF_AREA_SIZE 8 + +struct unf_dif_control_info_s { + unsigned short app_tag; + unsigned short flags; + unsigned int protect_opcode; + unsigned int fcp_dl; + unsigned int start_lba; + unsigned char actual_dif[UNF_DIF_AREA_SIZE]; + unsigned char expected_dif[UNF_DIF_AREA_SIZE]; + unsigned int dif_sge_count; + void *dif_sgl; +}; + +struct unf_scsi_cmd_s { + unsigned int scsi_host_id; + unsigned int scsi_id; /* cmd->dev->id */ + unsigned long long lun_id; + unsigned long long port_id; + unsigned int underflow; /* Underflow */ + unsigned int transfer_len; /* Transfer Length */ + unsigned int resid; /* Resid */ + unsigned int sense_buf_len; + int result; + unsigned int entry_count; /* IO Buffer counter */ + unsigned int abort; + unsigned int err_code_table_cout; /* error code size */ + unsigned long long cmnd_sn; + unsigned long time_out; /* EPL driver add timer */ + unsigned short cmnd_len; /* Cdb length */ + unsigned char data_direction; /* data direction */ + unsigned char *pcmnd; /* SCSI CDB */ + unsigned char *sense_buf; + void *drv_private; /* driver host pionter */ + void *driver_scribble; /* Xchg pionter */ + void *upper_cmnd; /* UpperCmnd pointer by driver */ + unsigned char *pc_lun_id; /* new lunid */ + unsigned int world_id; + struct unf_dif_control_info_s dif_control; /* DIF control */ + struct unf_ini_error_code_s *err_code_table; /* error code table */ + void *sgl; /* Sgl pointer */ + ini_get_sgl_entry_buf pfn_unf_ini_get_sgl_entry; + void (*pfn_done)(struct unf_scsi_cmd_s *); + struct dif_info_s dif_info; +}; + +/* + * R_CTL Basic Link Data defines + */ +#define FC_RCTL_BLS 0x80000000 +#define FC_RCTL_BLS_ACC (FC_RCTL_BLS | 0x04000000) +#define FC_RCTL_BLS_RJT (FC_RCTL_BLS | 0x05000000) + +/* + * BA_RJT reason code defines + */ +#define FCXLS_BA_OK 0x00000000 +#define FCXLS_BA_RJT_INVALID_COMMAND 0x00010000 +#define FCXLS_BA_RJT_LOGICAL_ERROR 0x00030000 + +/* + * BA_RJT code explanation + */ + +#define FCXLS_BA_RJT_INV_OXID_RXID 0x00000300 +#define FCXLS_LS_RJT_INVALID_OXID_RXID 0x00001700 + +/* + * Types (word) + */ +#define FC_TYPE_WORD_BLS 0x00000000 + +/* + * SFS structures + */ +struct unf_ba_rjt_s { + unsigned int reason_code; /* BLS reason code and Reason Explanation */ +}; + +#define FC_ABTS_ACC_SEQ_CNT 0x0000ffff +struct unf_ba_acc_s { + unsigned int seq_id; + unsigned int oxid_rxid; + unsigned int seq_cnt; +}; + +union unf_ba_pld_u { + struct unf_ba_rjt_s ba_rjt; + struct unf_ba_acc_s ba_acc; +}; + +struct unf_abts_rsps_s { + struct unf_fchead_s frame_hdr; + union unf_ba_pld_u ba_pld; +}; + +/* + * BLS RJT structure header and payload + */ +struct unf_bls_rjt_s { + struct unf_fchead_s frame_hdr; + /* BLS reason code and Reason Explanation */ + unsigned int reason_code; +}; + +/* + * ELS ACC + */ +struct unf_els_acc_s { + struct unf_fchead_s frame_hdr; + unsigned int cmnd; +}; + +/* + * ELS RJT + */ +struct unf_els_rjt_s { + struct unf_fchead_s frame_hdr; + unsigned int cmnd; + unsigned int reason_code; +}; + +/* + * FLOGI payload, + * FC-LS-2 Table 139 FLOGI, PLOGI, FDISC or LS_ACC Payload + */ +struct unf_flogi_payload_s { + unsigned int cmnd; + struct unf_fabric_parms_s fabric_parms; +}; + +/* + * Flogi and Flogi accept frames. They are the same structure + */ +struct unf_flogi_fdisc_acc_s { + struct unf_fchead_s frame_hdr; + struct unf_flogi_payload_s flogi_payload; +}; + +/* + * Fdisc and Fdisc accept frames. They are the same structure + */ +struct unf_fdisc_acc_s { + struct unf_fchead_s frame_hdr; + struct unf_flogi_payload_s fdisc_payload; +}; + +/* + * PLOGI payload + */ +struct unf_plogi_payload_s { + unsigned int cmnd; + struct unf_lgn_parms_s parms; +}; + +/* + * Plogi, Plogi accept, Pdisc and Pdisc accept frames. + * They are all the same structure. + */ +struct unf_plogi_pdisc_s { + struct unf_fchead_s frame_hdr; + struct unf_plogi_payload_s payload; +}; + +/* + * LOGO logout link service requests invalidation of service parameters and + * port name. + * see FC-PH 4.3 Section 21.4.8 + */ + +/* FC-LS-2 Table 12 LOGO Payload */ +struct unf_logo_payload_s { + unsigned int cmnd; + unsigned int nport_id; + unsigned int high_port_name; + unsigned int low_port_name; +}; + +/* + * payload to hold LOGO command + */ +struct unf_logo_s { + struct unf_fchead_s frame_hdr; + struct unf_logo_payload_s payload; +}; + +/* + * payload for ECHO command, refer to FC-LS-2 4.2.4 + */ +struct unf_echo_payload_s { + unsigned int cmnd; +#define UNF_FC_ECHO_PAYLOAD_LENGTH 255 /* Length in words */ + unsigned int data[UNF_FC_ECHO_PAYLOAD_LENGTH]; +}; + +struct unf_echo_s { + struct unf_fchead_s frame_hdr; + struct unf_echo_payload_s *echo_pld; + dma_addr_t phy_echo_addr; +}; + +#define UNF_PRLI_SIRT_EXTRA_SIZE 12 +/* + * payload for PRLI and PRLO + */ +struct unf_pril_payload_s { + unsigned int cmnd; +#define UNF_FC_PRLI_PAYLOAD_LENGTH 7 /* Length in words */ + unsigned int parms[UNF_FC_PRLI_PAYLOAD_LENGTH]; +}; + +/* + * FCHS structure with payload + */ +struct unf_prli_prlo_s { + struct unf_fchead_s frame_hdr; + struct unf_pril_payload_s payload; +}; + +/* + * ADISC payload + */ + +/* FC-LS-2 Table 75 ADISC Request payload */ +struct unf_adisc_payload_s { + unsigned int cmnd; + unsigned int hard_address; + unsigned int high_port_name; + unsigned int low_port_name; + unsigned int high_node_name; + unsigned int low_node_name; + unsigned int nport_id; +}; + +/* + * FCHS structure with payload + */ +struct unf_adisc_s { + /* FCHS structure */ + struct unf_fchead_s frame_hdr; + /* Payload data containing ADISC info */ + struct unf_adisc_payload_s adisc_payl; +}; + +/* + * RLS payload + */ +struct unf_rls_payload_s { + unsigned int cmnd; + unsigned int nport_id; /* in litle endian format */ +}; + +/* + * RLS + */ +struct unf_rls_s { + struct unf_fchead_s frame_hdr; /* FCHS structure */ + /* payload data containing the RLS info */ + struct unf_rls_payload_s rls; +}; + +/* + * RLS accept payload + */ +struct unf_rls_acc_payload_s { + unsigned int cmnd; + unsigned int link_failure_count; + unsigned int loss_of_sync_count; + unsigned int loss_of_signal_count; + unsigned int primitive_seq_count; + unsigned int invalid_trans_word_count; + unsigned int invalid_crc_count; +}; + +/* + * RLS accept + */ +struct unf_rls_acc_s { + struct unf_fchead_s frame_hdr; /* FCHS structure */ + /* payload data containing the RLS ACC info */ + struct unf_rls_acc_payload_s rls; +}; + +/* + * FCHS structure with payload + */ +struct unf_rrq_s { + struct unf_fchead_s frame_hdr; + unsigned int cmnd; + unsigned int sid; + unsigned int oxid_rxid; +}; + +/* + * ABTA accept + */ +struct unf_abts_acc_s { + struct unf_fchead_s frame_hdr; + unsigned int seq_id; + unsigned int oxid_rxid; + unsigned int seq_cnt; +}; + +struct unf_scr_s { + struct unf_fchead_s frame_hdr; + unsigned int payload[2]; +}; + +struct unf_ctiu_prem_s { + unsigned int rev_inid; + unsigned int gstype_gssub_options; + unsigned int cmnd_rsp_size; + unsigned int frag_reason_exp_vend; +}; + +struct unf_rftid_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int nport_id; + unsigned int fc_4_types[8]; +}; + +struct unf_rffid_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int nport_id; + unsigned int fc_4_feature; +}; + +struct unf_rffid_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; +}; + +struct unf_gffid_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int nport_id; +}; + +struct unf_gffid_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int fc_4_feature[32]; +}; + +struct unf_gnnid_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int nport_id; +}; + +struct unf_gnnid_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int node_name[2]; +}; + +struct unf_gpnid_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int nport_id; +}; + +struct unf_gpnid_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; + unsigned int port_name[2]; +}; + +struct unf_rft_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; +}; + +struct unf_srr_payload_s { + unsigned int srr_op; + unsigned short rx_id; + unsigned short ox_id; + unsigned int rel_offset; + unsigned char reserved[3]; + unsigned char rctl_for_iu; +}; + +struct unf_srr_s { + struct unf_fchead_s frame_hdr; + struct unf_srr_payload_s pld; +}; + +struct unf_srr_acc_pld_s { + unsigned int srr_op; /* 02000000h */ +}; + +struct unf_srr_acc_s { + struct unf_fchead_s frame_hdr; + struct unf_srr_acc_pld_s pld; +}; + +struct unf_ls_rjt_pld_s { + unsigned int srr_op; /* 01000000h */ + unsigned char vandor; + unsigned char reason_exp; + unsigned char reason; + unsigned char reserved; +}; + +struct unf_ls_rjt_s { + struct unf_fchead_s frame_hdr; + struct unf_ls_rjt_pld_s pld; +}; + +struct unf_rec_pld_s { + unsigned int rec_cmnd; + unsigned int xchg_org_sid; /* bit0-bit23 */ + unsigned short rx_id; + unsigned short ox_id; +}; + +struct unf_rec_s { + struct unf_fchead_s frame_hdr; + struct unf_rec_pld_s rec_pld; +}; + +struct unf_rec_acc_pld_s { + unsigned int cmnd; + unsigned short rx_id; + unsigned short ox_id; + unsigned int org_addr_id; /* bit0-bit23 */ + unsigned int rsp_addr_id; /* bit0-bit23 */ +}; + +struct unf_rec_acc_s { + struct unf_fchead_s frame_hdr; + struct unf_rec_acc_pld_s payload; +}; + +struct unf_gid_s { + struct unf_ctiu_prem_s ctiu_pream; + unsigned int scope_type; +}; + +struct unf_gid_acc_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; +}; + +#define UNF_LOOPMAP_COUNT 128 +struct unf_loop_init_s { + struct unf_fchead_s frame_hdr; + unsigned int cmnd; +#define UNF_FC_ALPA_BIT_MAP_SIZE 4 + unsigned int alpa_bit_map[UNF_FC_ALPA_BIT_MAP_SIZE]; +}; + +struct unf_loop_map_s { + struct unf_fchead_s frame_hdr; + unsigned int cmnd; + unsigned int loop_map[32]; +}; + +struct unf_ctiu_rjt_s { + struct unf_fchead_s frame_hdr; + struct unf_ctiu_prem_s ctiu_pream; +}; + +struct unf_gif_acc_pld_s { + struct unf_ctiu_prem_s ctiu_pream; + + unsigned int gid_port_id[UNF_GID_PORT_CNT]; +}; + +struct unf_gid_rsp_s { + struct unf_gif_acc_pld_s *gid_acc_pld; +}; + +struct unf_gid_req_rsp_s { + struct unf_fchead_s frame_hdr; + struct unf_gid_s gid_req; + struct unf_gid_rsp_s gid_rsp; +}; + +/* Added by fangtao FC-LS-2 Table 31 RSCN Payload */ +struct unf_rscn_port_id_page_s { + unsigned char port_id_port; + unsigned char port_id_area; + unsigned char port_id_domain; + + unsigned char addr_format : 2; + unsigned char event_qualifier : 4; + unsigned char reserved : 2; +}; + +struct unf_rscn_pld_s { + unsigned int cmnd; + struct unf_rscn_port_id_page_s port_id_page[UNF_RSCN_PAGE_SUM]; +}; + +struct unf_rscn_s { + struct unf_fchead_s frame_hdr; + struct unf_rscn_pld_s *rscn_pld; +}; + +union unf_sfs_u { + struct { + struct unf_fchead_s frame_head; + unsigned char data[0]; + } sfs_common; + struct unf_abts_rsps_s abts_rsps; + struct unf_els_acc_s els_acc; + struct unf_els_rjt_s els_rjt; + struct unf_plogi_pdisc_s plogi; + struct unf_logo_s logo; + struct unf_echo_s echo; + struct unf_echo_s echo_acc; + struct unf_prli_prlo_s prli; + struct unf_prli_prlo_s prlo; + struct unf_rls_s rls; + struct unf_rls_acc_s rls_acc; + struct unf_plogi_pdisc_s pdisc; + struct unf_adisc_s adisc; + struct unf_rrq_s rrq; + struct unf_flogi_fdisc_acc_s flogi; + struct unf_fdisc_acc_s fdisc; + struct unf_scr_s scr; + struct unf_rec_s rec; + struct unf_rec_acc_s rec_acc; + struct unf_srr_s srr; + struct unf_srr_acc_s srr_acc; + struct unf_ls_rjt_s ls_rjt; + struct unf_rscn_s rscn; + struct unf_gid_req_rsp_s get_id; + struct unf_rftid_s rft_id; + struct unf_rft_rsp_s rft_id_rsp; + struct unf_rffid_s rff_id; + struct unf_rffid_rsp_s rff_id_rsp; + struct unf_gffid_s gff_id; + struct unf_gffid_rsp_s gff_id_rsp; + struct unf_gnnid_s gnn_id; + struct unf_gnnid_rsp_s gnn_id_rsp; + struct unf_gpnid_s gpn_id; + struct unf_gpnid_rsp_s gpn_id_rsp; + struct unf_plogi_pdisc_s plogi_acc; + struct unf_plogi_pdisc_s pdisc_acc; + struct unf_adisc_s adisc_acc; + struct unf_prli_prlo_s prli_acc; + struct unf_prli_prlo_s prlo_acc; + struct unf_flogi_fdisc_acc_s flogi_acc; + struct unf_fdisc_acc_s fdisc_acc; + struct unf_loop_init_s lpi; + struct unf_loop_map_s loopmap; + struct unf_ctiu_rjt_s ctiu_rjt; +}; + +struct unf_sfs_entry_s { + /* Virtual addr of SFS buffer */ + union unf_sfs_u *fc_sfs_entry_ptr; + /* Physical addr of SFS buffer */ + unsigned long long sfs_buff_phy_addr; + /* Length of bytes in SFS buffer */ + unsigned int sfs_buff_len; + unsigned int cur_offset; +}; + +struct unf_fcp_rsp_iu_entry_s { + struct unf_fcprsp_iu_s *fcp_rsp_iu; + dma_addr_t fcp_rsp_iu_phy_addr; +}; + +struct unf_rjt_info_s { + unsigned int els_cmnd_code; + unsigned int reason_code; + unsigned int reason_explanation; +}; + +int unf_alloc_scsi_host(unf_scsi_host_s **v_scsi_host, + struct unf_host_param_s *v_host_param); +void unf_free_scsi_host(unf_scsi_host_s *v_scsi_host); +unsigned int unf_register_ini_transport(void); +void unf_unregister_ini_transport(void); +void unf_report_io_dm_event(void *v_lport, unsigned int type, + unsigned int value); +void unf_save_sense_data(void *scsicmd, const char *sense, int senslen); + +#endif
From: Chenguangli chenguangli2@huawei.com
driver inclusion category: feature bugzilla: NA
-----------------------------------------------------------------------
This module is used to process services related to the FC protocol.
Signed-off-by: Chenguangli chenguangli2@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Acked-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/scsi/Kconfig | 8 +- drivers/scsi/Makefile | 3 +- drivers/scsi/huawei/Kconfig | 21 + drivers/scsi/huawei/Makefile | 5 + drivers/scsi/huawei/hifc/Kconfig | 11 + drivers/scsi/huawei/hifc/Makefile | 42 + drivers/scsi/huawei/hifc/hifc_service.c | 3076 +++++++ drivers/scsi/huawei/hifc/hifc_service.h | 248 + drivers/scsi/huawei/hifc/unf_disc.c | 1320 +++ drivers/scsi/huawei/hifc/unf_disc.h | 53 + drivers/scsi/huawei/hifc/unf_event.c | 557 ++ drivers/scsi/huawei/hifc/unf_event.h | 101 + drivers/scsi/huawei/hifc/unf_exchg.c | 3632 +++++++++ drivers/scsi/huawei/hifc/unf_exchg.h | 513 ++ drivers/scsi/huawei/hifc/unf_service.c | 9873 +++++++++++++++++++++++ drivers/scsi/huawei/hifc/unf_service.h | 83 + 16 files changed, 19542 insertions(+), 4 deletions(-) create mode 100644 drivers/scsi/huawei/Kconfig create mode 100644 drivers/scsi/huawei/Makefile create mode 100644 drivers/scsi/huawei/hifc/Kconfig create mode 100644 drivers/scsi/huawei/hifc/Makefile create mode 100644 drivers/scsi/huawei/hifc/hifc_service.c create mode 100644 drivers/scsi/huawei/hifc/hifc_service.h create mode 100644 drivers/scsi/huawei/hifc/unf_disc.c create mode 100644 drivers/scsi/huawei/hifc/unf_disc.h create mode 100644 drivers/scsi/huawei/hifc/unf_event.c create mode 100644 drivers/scsi/huawei/hifc/unf_event.h create mode 100644 drivers/scsi/huawei/hifc/unf_exchg.c create mode 100644 drivers/scsi/huawei/hifc/unf_exchg.h create mode 100644 drivers/scsi/huawei/hifc/unf_service.c create mode 100644 drivers/scsi/huawei/hifc/unf_service.h
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index ec2708f78120..00006841fefc 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -179,7 +179,7 @@ config CHR_DEV_SCH changers are listed as "Type: Medium Changer" in /proc/scsi/scsi. If you have such hardware and want to use it with linux, say Y here. Check file:Documentation/scsi/scsi-changer.txt for details. - + If you want to compile this as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read file:Documentation/kbuild/modules.txt and @@ -475,7 +475,7 @@ config SCSI_DPT_I2O tristate "Adaptec I2O RAID support " depends on SCSI && PCI && VIRT_TO_BUS help - This driver supports all of Adaptec's I2O based RAID controllers as + This driver supports all of Adaptec's I2O based RAID controllers as well as the DPT SmartRaid V cards. This is an Adaptec maintained driver by Deanna Bonds. See file:Documentation/scsi/dpti.txt.
@@ -638,7 +638,7 @@ config SCSI_GDTH ---help--- Formerly called GDT SCSI Disk Array Controller Support.
- This is a driver for RAID/SCSI Disk Array Controllers (EISA/ISA/PCI) + This is a driver for RAID/SCSI Disk Array Controllers (EISA/ISA/PCI) manufactured by Intel Corporation/ICP vortex GmbH. It is documented in the kernel source in file:drivers/scsi/gdth.c and file:drivers/scsi/gdth.h. @@ -1113,6 +1113,8 @@ source "drivers/scsi/qla4xxx/Kconfig" source "drivers/scsi/qedi/Kconfig" source "drivers/scsi/qedf/Kconfig"
+source "drivers/scsi/huawei/Kconfig" + config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 6d71b2a9592b..2973693f6dcc 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -79,10 +79,11 @@ obj-$(CONFIG_SCSI_IPS) += ips.o obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o -obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o +obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ obj-$(CONFIG_SCSI_LPFC) += lpfc/ +obj-$(CONFIG_SCSI_HUAWEI_FC) += huawei/ obj-$(CONFIG_SCSI_BFA_FC) += bfa/ obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/ obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o diff --git a/drivers/scsi/huawei/Kconfig b/drivers/scsi/huawei/Kconfig new file mode 100644 index 000000000000..a9fbdef9b4b3 --- /dev/null +++ b/drivers/scsi/huawei/Kconfig @@ -0,0 +1,21 @@ +# +# Huawei driver configuration +# + +config SCSI_HUAWEI_FC + tristate "Huawei devices" + depends on PCI && SCSI + depends on SCSI_FC_ATTRS + default m + ---help--- + If you have a Fibre Channel PCI card belonging to this class, say Y. + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Huawei cards. If you say Y, you will be asked + for your specific card in the following questions. + +if SCSI_HUAWEI_FC + +source "drivers/scsi/huawei/hifc/Kconfig" + +endif # SCSI_HUAWEI_FC diff --git a/drivers/scsi/huawei/Makefile b/drivers/scsi/huawei/Makefile new file mode 100644 index 000000000000..fa48694cc166 --- /dev/null +++ b/drivers/scsi/huawei/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Huawei device drivers. +# + +obj-$(CONFIG_SCSI_FC_HIFC) += hifc/ diff --git a/drivers/scsi/huawei/hifc/Kconfig b/drivers/scsi/huawei/hifc/Kconfig new file mode 100644 index 000000000000..79c7954a0735 --- /dev/null +++ b/drivers/scsi/huawei/hifc/Kconfig @@ -0,0 +1,11 @@ +# +# Huawei driver configuration +# +config SCSI_FC_HIFC + tristate "Huawei hifc Fibre Channel Support" + default m + depends on PCI && SCSI + depends on SCSI_FC_ATTRS + ---help--- + This driver supports Huawei Fibre Channel PCI and + PCIE host adapters. diff --git a/drivers/scsi/huawei/hifc/Makefile b/drivers/scsi/huawei/hifc/Makefile new file mode 100644 index 000000000000..0128086c75d9 --- /dev/null +++ b/drivers/scsi/huawei/hifc/Makefile @@ -0,0 +1,42 @@ +obj-$(CONFIG_SCSI_FC_HIFC) += hifc.o + +hifc-objs += hifc_utils.o +hifc-objs += hifc_hba.o +hifc-objs += hifc_portmng.o + +hifc-objs += hifc_module.o +hifc-objs += hifc_chipitf.o +hifc-objs += hifc_io.o +hifc-objs += hifc_queue.o +hifc-objs += hifc_service.o +hifc-objs += hifc_wqe.o +hifc-objs += hifc_cfg.o +hifc-objs += hifc_lld.o + +hifc-objs += unf_io.o +hifc-objs += unf_io_abnormal.o +hifc-objs += unf_scsi.o +hifc-objs += unf_init.o +hifc-objs += unf_event.o +hifc-objs += unf_exchg.o +hifc-objs += unf_lport.o +hifc-objs += unf_disc.o +hifc-objs += unf_rport.o +hifc-objs += unf_service.o +hifc-objs += unf_portman.o +hifc-objs += unf_npiv.o +hifc-objs += hifc_sml.o +hifc-objs += hifc_tool.o +hifc-objs += hifc_tool_hw.o +hifc-objs += hifc_dbgtool_knl.o + +hifc-objs += hifc_hwif.o +hifc-objs += hifc_eqs.o +hifc-objs += hifc_api_cmd.o +hifc-objs += hifc_mgmt.o +hifc-objs += hifc_wq.o +hifc-objs += hifc_cmdq.o +hifc-objs += hifc_hwdev.o +hifc-objs += hifc_cqm_main.o +hifc-objs += hifc_cqm_object.o + diff --git a/drivers/scsi/huawei/hifc/hifc_service.c b/drivers/scsi/huawei/hifc/hifc_service.c new file mode 100644 index 000000000000..52c9ad7670ee --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_service.c @@ -0,0 +1,3076 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "hifc_module.h" +#include "hifc_service.h" +#include "hifc_io.h" +#include "hifc_chipitf.h" + +#define HIFC_RQ_ERROR_FRAME 0x100 +#define HIFC_ELS_SRQ_BUF_NUM 0x9 + +/* Parent SCQ Receive the ELS processing function */ +static unsigned int hifc_scq_rcv_els_cmd(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_els_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_els_rsp_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the GS RSP processing function */ +static unsigned int hifc_scq_rcv_gs_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the BLS RSP processing function */ +static unsigned int hifc_scq_rcv_abts_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the offload completion processing function */ +static unsigned int hifc_scq_rcv_offload_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the flush sq completion processing function */ +static unsigned int hifc_scq_rcv_flush_sq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the bufferclear completion processing function */ +static unsigned int hifc_scq_rcv_buf_clear_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_sess_rst_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_clear_srq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_abts_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +typedef unsigned int (*pfn_scqe_handler)(struct hifc_hba_s *, + union hifcoe_scqe_u *); + +struct unf_scqe_handler_table_s { + unsigned int scqe_type; /* ELS type */ + int reclaim_sq_wpg; + pfn_scqe_handler pfn_scqe_handle_fun; +}; + +struct unf_scqe_handler_table_s scqe_handler_table[] = { + { /* INI rcvd ELS_CMND */ + HIFC_SCQE_ELS_CMND, + UNF_FALSE, + hifc_scq_rcv_els_cmd + }, + { /* INI rcvd ELS_RSP */ + HIFC_SCQE_ELS_RSP, + UNF_TRUE, + hifc_scq_rcv_els_rsp + }, + { /* INI rcvd GS_RSP */ + HIFC_SCQE_GS_RSP, + UNF_TRUE, + hifc_scq_rcv_gs_rsp + }, + { /* INI rcvd BLS_RSP */ + HIFC_SCQE_ABTS_RSP, + UNF_TRUE, + hifc_scq_rcv_abts_rsp + }, + { /* INI rcvd FCP RSP */ + HIFC_SCQE_FCP_IRSP, + UNF_TRUE, + hifc_scq_recv_iresp + }, + { /* INI rcvd ELS_RSP STS(Done) */ + HIFC_SCQE_ELS_RSP_STS, + UNF_TRUE, + hifc_scq_rcv_els_rsp_sts + }, + { /* INI rcvd Session enable STS */ + HIFC_SCQE_SESS_EN_STS, + UNF_FALSE, + hifc_scq_rcv_offload_sts + }, + { /* INI rcvd flush (pending) SQ STS */ + HIFC_SCQE_FLUSH_SQ_STS, + UNF_FALSE, + hifc_scq_rcv_flush_sq_sts + }, + { /* INI rcvd Buffer clear STS */ + HIFC_SCQE_BUF_CLEAR_STS, + UNF_FALSE, + hifc_scq_rcv_buf_clear_sts + }, + { /* INI rcvd session reset STS */ + HIFC_SCQE_SESS_RST_STS, + UNF_FALSE, + hifc_scq_rcv_sess_rst_sts + }, + { /* ELS SRQ */ + HIFC_SCQE_CLEAR_SRQ_STS, + UNF_FALSE, + hifc_scq_rcv_clear_srq_sts + }, + { /* INI rcvd TMF RSP */ + HIFC_SCQE_FCP_ITMF_RSP, + UNF_TRUE, + hifc_scq_recv_iresp + }, + { /* INI rcvd TMF Marker STS */ + HIFC_SCQE_ITMF_MARKER_STS, + UNF_FALSE, + hifc_scq_rcv_marker_sts + }, + { /* INI rcvd ABTS Marker STS */ + HIFC_SCQE_ABTS_MARKER_STS, + UNF_FALSE, + hifc_scq_rcv_abts_marker_sts + } +}; + +static unsigned int hifc_get_els_rps_pld_len(unsigned short type, + unsigned short cmnd, + unsigned int *v_els_acc_pld_len) +{ + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_els_acc_pld_len, + return UNF_RETURN_ERROR); + + /* RJT */ + if (type == ELS_RJT) { + *v_els_acc_pld_len = UNF_ELS_ACC_RJT_LEN; + return RETURN_OK; + } + + /* ACC */ + switch (cmnd) { + /* uses the same PAYLOAD length as PLOGI. */ + case ELS_FLOGI: + case ELS_PDISC: + case ELS_PLOGI: + *v_els_acc_pld_len = UNF_PLOGI_ACC_PAYLOAD_LEN; + break; + + case ELS_PRLI: + /* The PRLI ACC payload extends 12 bytes */ + *v_els_acc_pld_len = UNF_PRLI_ACC_PAYLOAD_LEN - + UNF_PRLI_SIRT_EXTRA_SIZE; + break; + + case ELS_LOGO: + *v_els_acc_pld_len = UNF_LOGO_ACC_PAYLOAD_LEN; + break; + + case ELS_PRLO: + *v_els_acc_pld_len = UNF_PRLO_ACC_PAYLOAD_LEN; + break; + + case ELS_RSCN: + *v_els_acc_pld_len = UNF_RSCN_ACC_PAYLOAD_LEN; + break; + + case ELS_ADISC: + *v_els_acc_pld_len = UNF_ADISC_ACC_PAYLOAD_LEN; + break; + + case ELS_RRQ: + *v_els_acc_pld_len = UNF_RRQ_ACC_PAYLOAD_LEN; + break; + + case ELS_SCR: + *v_els_acc_pld_len = UNF_SCR_RSP_PAYLOAD_LEN; + break; + + case ELS_ECHO: + *v_els_acc_pld_len = UNF_ECHO_ACC_PAYLOAD_LEN; + break; + case ELS_RLS: + *v_els_acc_pld_len = UNF_RLS_ACC_PAYLOAD_LEN; + break; + case ELS_REC: + *v_els_acc_pld_len = UNF_REC_ACC_PAYLOAD_LEN; + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Unknown ELS command(0x%x)", cmnd); + ret = UNF_RETURN_ERROR; + break; + } + + return ret; +} + +struct hifc_els_cmd_payload_table_s { + unsigned short cmnd; /* ELS type */ + unsigned int req_pld_len; + unsigned int rsp_pld_len; +}; + +struct hifc_els_cmd_payload_table_s els_pld_table_map[] = { + { ELS_FDISC, + UNF_FDISC_PAYLOAD_LEN, + UNF_FDISC_ACC_PAYLOAD_LEN + }, + { ELS_FLOGI, + UNF_FLOGI_PAYLOAD_LEN, + UNF_FLOGI_ACC_PAYLOAD_LEN + }, + { ELS_PLOGI, + UNF_PLOGI_PAYLOAD_LEN, + UNF_PLOGI_ACC_PAYLOAD_LEN + }, + { ELS_SCR, + UNF_SCR_PAYLOAD_LEN, + UNF_SCR_RSP_PAYLOAD_LEN + }, + { ELS_PDISC, + UNF_PDISC_PAYLOAD_LEN, + UNF_PDISC_ACC_PAYLOAD_LEN + }, + { ELS_LOGO, + UNF_LOGO_PAYLOAD_LEN, + UNF_LOGO_ACC_PAYLOAD_LEN + }, + { ELS_PRLO, + UNF_PRLO_PAYLOAD_LEN, + UNF_PRLO_ACC_PAYLOAD_LEN + }, + { ELS_ADISC, + UNF_ADISC_PAYLOAD_LEN, + UNF_ADISC_ACC_PAYLOAD_LEN + }, + { ELS_RRQ, + UNF_RRQ_PAYLOAD_LEN, + UNF_RRQ_ACC_PAYLOAD_LEN + }, + { ELS_RSCN, + 0, + UNF_RSCN_ACC_PAYLOAD_LEN + }, + { ELS_ECHO, + UNF_ECHO_PAYLOAD_LEN, + UNF_ECHO_ACC_PAYLOAD_LEN + }, + { ELS_RLS, + UNF_RLS_PAYLOAD_LEN, + UNF_RLS_ACC_PAYLOAD_LEN + }, + { ELS_REC, + UNF_REC_PAYLOAD_LEN, + UNF_REC_ACC_PAYLOAD_LEN + } +}; + +static unsigned int hifc_get_els_req_and_acc_pld_len(unsigned short cmnd, + unsigned int *req_pld_len, + unsigned int *rsp_pld_len) +{ + unsigned int ret = RETURN_OK; + unsigned int i; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, req_pld_len, return UNF_RETURN_ERROR); + + for (i = 0; i < (sizeof(els_pld_table_map) / + sizeof(struct hifc_els_cmd_payload_table_s)); i++) { + if (els_pld_table_map[i].cmnd == cmnd) { + *req_pld_len = els_pld_table_map[i].req_pld_len; + *rsp_pld_len = els_pld_table_map[i].rsp_pld_len; + return ret; + } + } + + switch (cmnd) { + case ELS_PRLI: + /* If sirt is enabled, The PRLI ACC payload extends + * 12 bytes + */ + *req_pld_len = HIFC_GET_PRLI_PAYLOAD_LEN; + *rsp_pld_len = HIFC_GET_PRLI_PAYLOAD_LEN; + break; + + default: + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, "[err]Unknown ELS_CMD(0x%x)", cmnd); + ret = UNF_RETURN_ERROR; + break; + } + + return ret; +} + +/* + * Function Name : hifc_get_els_frame_len + * Function Description: Get ELS Frame length + * Input Parameters : type, + * : cmnd + * Output Parameters : v_frame_len + * Return Type : unsigned int + */ +static unsigned int hifc_get_els_frame_len(unsigned short type, + unsigned short cmnd, + unsigned int *v_frame_len) +{ + unsigned int ret = RETURN_OK; + unsigned int hdr_len = sizeof(struct unf_fchead_s); + unsigned int req_len = 0; + unsigned int rsp_len = 0; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_frame_len, return UNF_RETURN_ERROR); + + if (type == ELS_RJT) + rsp_len = UNF_ELS_ACC_RJT_LEN; + else + ret = hifc_get_els_req_and_acc_pld_len(cmnd, &req_len, + &rsp_len); + + if (ret == RETURN_OK) + *v_frame_len = hdr_len + ((type == ELS_ACC || type == ELS_RJT) ? + rsp_len : req_len); + + return ret; +} + +static void hifc_build_els_frame_header(unsigned short v_xid_base, + unsigned short v_cmnd_type, + unsigned short els_code, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int fctl = 0; + unsigned int rctl = 0; + unsigned int type = 0; + struct unf_fchead_s *cm_fc_hdr_buf = NULL; + struct unf_fchead_s *pkg_fc_hdr_info = NULL; + + pkg_fc_hdr_info = &v_pkg->frame_head; + cm_fc_hdr_buf = HIFC_GET_CMND_FC_HEADER(v_pkg); + + if (v_cmnd_type == ELS_CMND) { + rctl = HIFC_FC_RCTL_ELS_REQ; + fctl = HIFC_FCTL_REQ; + + /* If the ELS_CMD frame is sent, Adjusting the oxid */ + cm_fc_hdr_buf->oxid_rxid = pkg_fc_hdr_info->oxid_rxid + + ((unsigned int)v_xid_base << 16); + } else { + rctl = HIFC_FC_RCTL_ELS_RSP; + fctl = HIFC_FCTL_RESP; + + /* If the ELS_RSP frame is sent, Adjusting the rxid */ + cm_fc_hdr_buf->oxid_rxid = pkg_fc_hdr_info->oxid_rxid + + v_xid_base; + } + + type = HIFC_FC_TYPE_ELS; + + /* Get SID, DID, OXID, RXID from CM layer */ + cm_fc_hdr_buf->rctl_did = pkg_fc_hdr_info->rctl_did; + cm_fc_hdr_buf->csctl_sid = pkg_fc_hdr_info->csctl_sid; + cm_fc_hdr_buf->parameter = 0; + + /* R_CTL, CS_CTL, TYPE, F_CTL, SEQ_ID, DF_CTL, SEQ_CNT, LL filled */ + UNF_SET_FC_HEADER_RCTL(cm_fc_hdr_buf, rctl); + UNF_SET_FC_HEADER_CS_CTL(cm_fc_hdr_buf, 0); + UNF_SET_FC_HEADER_TYPE(cm_fc_hdr_buf, type); + UNF_SET_FC_HEADER_FCTL(cm_fc_hdr_buf, fctl); + UNF_SET_FC_HEADER_SEQ_CNT(cm_fc_hdr_buf, 0); + UNF_SET_FC_HEADER_DF_CTL(cm_fc_hdr_buf, 0); + UNF_SET_FC_HEADER_SEQ_ID(cm_fc_hdr_buf, 0); + + UNF_PRINT_SFS(UNF_INFO, 0, cm_fc_hdr_buf, sizeof(struct unf_fchead_s)); +} + +void hifc_save_login_para_in_sq_info( + struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_co_parms) +{ + struct hifc_hba_s *hba = NULL; + unsigned int rport_index = v_login_co_parms->rport_index; + struct hifc_parent_sq_info_s *sq_info = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) save login parms,but uplevel alloc invalid rport index: 0x%x", + hba->port_cfg.port_id, rport_index); + + return; + } + + sq_info = + &hba->parent_queue_mgr->parent_queues[rport_index].parent_sq_info; + + sq_info->plogi_coparams.seq_cnt = v_login_co_parms->seq_cnt; + sq_info->plogi_coparams.ed_tov = v_login_co_parms->ed_tov; + sq_info->plogi_coparams.tx_mfs = (v_login_co_parms->tx_mfs < + HIFC_DEFAULT_TX_MAX_FREAM_SIZE) ? HIFC_DEFAULT_TX_MAX_FREAM_SIZE : + v_login_co_parms->tx_mfs; + + sq_info->plogi_coparams.ed_tov_timer_val = + v_login_co_parms->ed_tov_timer_val; +} + +static void hifc_save_default_plogi_param_in_ctx( + struct hifc_hba_s *v_hba, + struct hifcoe_parent_context_s *v_ctx, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int tx_mfs = HIFC_DEFAULT_TX_MAX_FREAM_SIZE; + unsigned int did = 0; + + did = UNF_GET_DID(v_pkg); + + if (did == UNF_FC_FID_DIR_SERV) + tx_mfs = 2048; + + v_ctx->sw_section.tx_mfs = cpu_to_be16((unsigned short)(tx_mfs)); +} + +static void hifc_save_plogi_acc_param_in_ctx( + struct hifc_hba_s *v_hba, + struct hifcoe_parent_context_s *v_ctx, + struct unf_frame_pkg_s *v_pkg) +{ +#define HIFC_UCODE_MAX_PKT_SIZE_PER_DISPATCH ((8 * 1024)) + + struct unf_lgn_port_coparms_s *port_co_param = NULL; + struct unf_plogi_payload_s *plogi_acc_pld = NULL; + + plogi_acc_pld = UNF_GET_PLOGI_ACC_PAYLOAD(v_pkg); + port_co_param = &plogi_acc_pld->parms.co_parms; + + /* e_d_tov and seq_cnt */ + hifc_big_to_cpu32(&v_ctx->sw_section.sw_ctxt_config.pctxt_val1, + sizeof(unsigned int)); + + v_ctx->sw_section.sw_ctxt_config.dw.e_d_tov = + port_co_param->e_d_tov_resolution; + + v_ctx->sw_section.sw_ctxt_config.dw.seq_cnt = + port_co_param->seq_cnt; + + hifc_cpu_to_big32(&v_ctx->sw_section.sw_ctxt_config.pctxt_val1, + sizeof(unsigned int)); + + v_ctx->sw_section.tx_mfs = + (unsigned short)(v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE]) < + HIFC_DEFAULT_TX_MAX_FREAM_SIZE ? + cpu_to_be16((unsigned short)HIFC_DEFAULT_TX_MAX_FREAM_SIZE) : + cpu_to_be16 ((unsigned short) + (v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE])); + + v_ctx->sw_section.e_d_tov_timer_val = + cpu_to_be32(port_co_param->e_d_tov); + + v_ctx->sw_section.mfs_unaligned_bytes = + cpu_to_be16(HIFC_UCODE_MAX_PKT_SIZE_PER_DISPATCH % + port_co_param->bb_receive_data_field_size); +} + +static void hifc_recover_offloading_state( + struct hifc_parent_queue_info_s *v_prntq_info, + enum hifc_parent_queue_state_e offload_state) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&v_prntq_info->parent_queue_state_lock, flag); + + if (v_prntq_info->offload_state == HIFC_QUEUE_STATE_OFFLOADING) + v_prntq_info->offload_state = offload_state; + + spin_unlock_irqrestore(&v_prntq_info->parent_queue_state_lock, flag); +} + +static void hifc_save_magic_num_in_ctx(struct hifcoe_parent_context_s *v_ctx, + struct unf_frame_pkg_s *v_pkg) +{ + /* The CID itself is initialized by the microcode. + * The driver multiplexes the CID as magicnum and then updates + * the CID by the microcode. + */ + v_ctx->sw_section.cid = cpu_to_be32(UNF_GETXCHGALLOCTIME(v_pkg)); +} + +static void hifc_save_magic_num_in_nurmal_root_ts( + struct hifc_root_sqe_s *v_rt_sqe, + struct unf_frame_pkg_s *v_pkg) +{ + v_rt_sqe->task_section.fc_dw1.magic_num = UNF_GETXCHGALLOCTIME(v_pkg); +} + +static int hifc_check_need_delay_offload( + void *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int rport_idx, + struct hifc_parent_queue_info_s *v_cur_parent_queue, + struct hifc_parent_queue_info_s **v_offload_parnt_queue) +{ + unsigned long flag = 0; + struct hifc_parent_queue_info_s *offload_parnt_queue = NULL; + + spin_lock_irqsave(&v_cur_parent_queue->parent_queue_state_lock, flag); + + if (v_cur_parent_queue->offload_state == HIFC_QUEUE_STATE_OFFLOADING) { + spin_unlock_irqrestore( + &v_cur_parent_queue->parent_queue_state_lock, flag); + + offload_parnt_queue = hifc_find_offload_parent_queue( + v_hba, + v_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, + v_pkg->frame_head.rctl_did & UNF_NPORTID_MASK, + rport_idx); + if (offload_parnt_queue) { + *v_offload_parnt_queue = offload_parnt_queue; + + return UNF_TRUE; + } + } else { + spin_unlock_irqrestore( + &v_cur_parent_queue->parent_queue_state_lock, flag); + } + + return UNF_FALSE; +} + +static unsigned int hifc_build_service_wqe_root_offload( + void *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifc_parent_queue_info_s *v_parnt_qinfo, + struct hifc_root_sqe_s *v_sqe) +{ + unsigned int cqm_xid = 0; + unsigned short els_cmnd_type = UNF_ZERO; + struct hifc_parent_ctx_s *parnt_ctx = NULL; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifcoe_parent_context_s *v_ctx = NULL; + + els_cmnd_type = HIFC_GET_ELS_RSP_TYPE(v_pkg->cmnd); + cqm_xid = hifc_get_parent_ctx_xid_by_pkg(v_hba, v_pkg); + + /* An offload request is initiated only when the parent queue is in the + * initialized state + */ + if (v_parnt_qinfo->offload_state == HIFC_QUEUE_STATE_INITIALIZED) { + /* Obtain Parent Context and set WQE to off_load, GPA_Addr */ + parnt_ctx = hifc_get_parnt_ctx_virt_addr_by_pkg(v_hba, v_pkg); + + sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg); + if (unlikely((!parnt_ctx) || (!sq_info) || + (cqm_xid == INVALID_VALUE32))) { + return UNF_RETURN_ERROR; + } + + /* Fill in ROOT SQE with offload request */ + hifc_build_els_wqe_root_offload( + v_sqe, + parnt_ctx->cqm_parent_ctx_obj->paddr, + cqm_xid); + + /* If the value is PlogiAcc, parse the FlogiAcc negotiation + * parameter and fill in Context + */ + v_ctx = (struct hifcoe_parent_context_s *) + parnt_ctx->virt_parent_ctx; + + if (els_cmnd_type == ELS_ACC) + hifc_save_plogi_acc_param_in_ctx( + (struct hifc_hba_s *)v_hba, v_ctx, v_pkg); + else + hifc_save_default_plogi_param_in_ctx( + (struct hifc_hba_s *)v_hba, v_ctx, v_pkg); + + /* The SID DID parameter is updated to Parent SQ Qinfo */ + sq_info->local_port_id = UNF_GET_SID(v_pkg); + sq_info->remote_port_id = UNF_GET_DID(v_pkg); + + /* Transfers the key value to the ucode for offload */ + hifc_big_to_cpu32(v_ctx->key, sizeof(v_ctx->key)); + memcpy(v_ctx->key, &sq_info->local_port_id, + sizeof(sq_info->local_port_id)); + memcpy((unsigned char *)v_ctx->key + + sizeof(sq_info->local_port_id), + &sq_info->remote_port_id, + sizeof(sq_info->remote_port_id)); + + hifc_cpu_to_big32(v_ctx->key, sizeof(v_ctx->key)); + + /* Update magic num to parent_ctx */ + hifc_save_magic_num_in_ctx(v_ctx, v_pkg); + + hifc_build_service_wqe_ctx_sge( + v_sqe, parnt_ctx->parent_ctx, + sizeof(struct hifcoe_parent_context_s)); + + v_parnt_qinfo->offload_state = HIFC_QUEUE_STATE_OFFLOADING; + } else { + /* If the connection is being uninstalled and the plogi is + * delivered through the root channel, the plogi must be carried + * to the ucode. + */ + v_sqe->task_section.fc_dw4.parent_xid = cqm_xid; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send PLOGI with no offload while parent queue is not initialized status", + ((struct hifc_hba_s *)v_hba)->port_cfg.port_id); + } + + return RETURN_OK; +} + +static unsigned int hifc_send_els_via_root(void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned short els_cmd_code = UNF_ZERO; + unsigned short els_cmnd_type = UNF_ZERO; + unsigned int frame_len = 0; + unsigned int exch_id = 0; + unsigned int scq_num = 0; + unsigned int rport_idx = 0; + int sqe_delay = UNF_FALSE; + void *frame_addr = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + struct hifc_parent_queue_info_s *offload_parnt_queue = NULL; + struct hifc_root_sqe_s *sqe = NULL; + struct hifc_root_sqe_s local_rt_sqe; + unsigned long flag = 0; + enum hifc_parent_queue_state_e last_offload_state = + HIFC_QUEUE_STATE_INITIALIZED; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + unsigned long long frame_phy_addr; + + /* The ROOT SQE is assembled in local variables and then copied to the + * queue memory + */ + sqe = &local_rt_sqe; + hba = (struct hifc_hba_s *)v_hba; + + memset(sqe, 0, sizeof(local_rt_sqe)); + + /* Determine the ELS type in the pstPkg */ + els_cmnd_type = HIFC_GET_ELS_RSP_TYPE(v_pkg->cmnd); + if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) { + els_cmd_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd); + exch_id = UNF_GET_RXID(v_pkg); + sqe->task_section.fc_dw0.task_type = HIFC_SQE_ELS_RSP; + } else { + els_cmd_code = els_cmnd_type; + els_cmnd_type = ELS_CMND; + exch_id = UNF_GET_OXID(v_pkg); + sqe->task_section.fc_dw0.task_type = HIFC_SQE_ELS_CMND; + } + if ((els_cmd_code == ELS_ECHO) && (els_cmnd_type != ELS_RJT)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_WARN, + "[info]Port(0x%x) RPort(0x%x) send ELS ECHO can't send via root Type(0x%x)", + hba->port_cfg.port_id, rport_idx, els_cmnd_type); + + return UNF_RETURN_NOT_SUPPORT; + } + exch_id += hba->exit_base; + + ret = hifc_get_els_frame_len(els_cmnd_type, els_cmd_code, &frame_len); + if (ret != RETURN_OK) { + dump_stack(); + return ret; + } + + /* Obtains the frame start address */ + frame_addr = HIFC_GET_CMND_HEADER_ADDR(v_pkg); + frame_phy_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr; + + /* Assemble the frame header and adjust the Paylaod based on the ELS */ + hifc_build_els_frame_header(hba->exit_base, els_cmnd_type, + els_cmd_code, v_pkg); + + /* Assembling the Control Section */ + hifc_build_service_wqe_ctrl_section( + &sqe->ctrl_section, + HIFC_BYTES_TO_QW_NUM( + sizeof(struct hifc_root_sqe_task_section_s)), + HIFC_BYTES_TO_QW_NUM(sizeof(struct hifc_root_sge_s))); + + /* Fill in Normal Root SQE TS */ + rport_idx = v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + scq_num = hifc_get_rport_maped_cmd_scqn(v_hba, rport_idx); + hifc_build_service_wqe_root_ts(v_hba, sqe, exch_id, rport_idx, scq_num); + + /* Upsate magic number into sqe */ + hifc_save_magic_num_in_nurmal_root_ts(sqe, v_pkg); + + /* Fill in the special part of Normal Root SQE TS and initiate implicit + * uninstallation + */ + if ((els_cmd_code == ELS_PLOGI) && (els_cmnd_type != ELS_RJT)) { + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!prnt_qinfo) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[warn]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) find parent queue fail", + hba->port_cfg.port_id, rport_idx, + els_cmnd_type); + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + last_offload_state = prnt_qinfo->offload_state; + + /* Fill in the special part of Normal Root SQE TS */ + ret = hifc_build_service_wqe_root_offload((void *)hba, + v_pkg, prnt_qinfo, + sqe); + if (ret != RETURN_OK) { + spin_unlock_irqrestore( + &prnt_qinfo->parent_queue_state_lock, flag); + + return ret; + } + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + /* Before the offload, check whether there is a risk of + * repeated offload + */ + sqe_delay = hifc_check_need_delay_offload((void *)hba, + v_pkg, rport_idx, + prnt_qinfo, + &offload_parnt_queue); + } + + /* Fill in Normal Root SQE SGE */ + hifc_build_service_wqe_root_sge(sqe, frame_addr, frame_phy_addr, + frame_len, v_hba); + + if (sqe_delay == UNF_TRUE) { + ret = hifc_push_delay_sqe((void *)hba, offload_parnt_queue, + sqe, v_pkg); + if (ret == RETURN_OK) { + hifc_recover_offloading_state(prnt_qinfo, + last_offload_state); + + return ret; + } + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x)", + hba->port_cfg.port_id, rport_idx, els_cmnd_type, + els_cmd_code, exch_id); + + ret = hifc_root_sq_enqueue(hba, sqe); + if ((ret != RETURN_OK) && (prnt_qinfo)) { + hifc_recover_offloading_state(prnt_qinfo, last_offload_state); + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == + UNF_TRUE) { + memcpy(&destroy_sqe_info, + &prnt_qinfo->parent_sq_info.destroy_sqe, + sizeof(struct hifc_destroy_ctrl_info_s)); + + prnt_qinfo->parent_sq_info.destroy_sqe.valid = + UNF_FALSE; + } + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + hifc_pop_destroy_parent_queue_sqe((void *)v_hba, + &destroy_sqe_info); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x) fail, recover offloadstatus(%u)", + hba->port_cfg.port_id, + rport_idx, + els_cmnd_type, + els_cmd_code, + exch_id, + prnt_qinfo->offload_state); + } + + return ret; +} + +static void *hifc_get_els_frame_addr(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned short els_cmd_code, + unsigned short els_cmnd_type, + unsigned long long *v_phyaddr) +{ + void *frame_pld_addr; + dma_addr_t els_frame_addr = 0; + + if (els_cmd_code == ELS_ECHO) { + frame_pld_addr = (void *)UNF_GET_ECHO_PAYLOAD(v_pkg); + els_frame_addr = UNF_GET_ECHO_PAYLOAD_PHYADDR(v_pkg); + } else if (els_cmd_code == ELS_RSCN) { + if (els_cmnd_type == ELS_CMND) { + /* Not Support */ + frame_pld_addr = NULL; + els_frame_addr = 0; + } else { + frame_pld_addr = + (void *)UNF_GET_RSCN_ACC_PAYLOAD(v_pkg); + els_frame_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr + + sizeof(struct unf_fchead_s); + } + } else { + frame_pld_addr = (void *)HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg); + els_frame_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr + + sizeof(struct unf_fchead_s); + } + *v_phyaddr = els_frame_addr; + return frame_pld_addr; +} + +static unsigned int hifc_send_els_via_parent( + void *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifc_parent_queue_info_s *v_prntq_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned short els_cmd_code = UNF_ZERO; + unsigned short els_cmnd_type = UNF_ZERO; + unsigned short remote_xid = 0; + unsigned short local_xid = 0; + struct hifc_hba_s *hba; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifcoe_sqe_s sqe; + void *frame_pld_addr; + unsigned int frame_pld_len = 0; + unsigned int acc_pld_len = 0; + unsigned long long fram_phy_addr = 0; + + hba = (struct hifc_hba_s *)v_hba; + + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + + sq_info = &v_prntq_info->parent_sq_info; + + /* Determine the ELS type in pstPkg */ + els_cmnd_type = HIFC_GET_ELS_CMND_CODE(v_pkg->cmnd); + if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) { + els_cmd_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd); + remote_xid = UNF_GET_OXID(v_pkg); + local_xid = UNF_GET_RXID(v_pkg) + hba->exit_base; + } else { + els_cmd_code = els_cmnd_type; + els_cmnd_type = ELS_CMND; + local_xid = UNF_GET_OXID(v_pkg) + hba->exit_base; + remote_xid = UNF_GET_RXID(v_pkg); + } + + frame_pld_addr = hifc_get_els_frame_addr(v_hba, v_pkg, els_cmd_code, + els_cmnd_type, &fram_phy_addr); + + if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) { + ret = hifc_get_els_rps_pld_len(els_cmnd_type, els_cmd_code, + &frame_pld_len); + if (ret != RETURN_OK) + return ret; + + hifc_build_els_wqe_ts_rsp( + &sqe, sq_info, frame_pld_addr, + els_cmnd_type, els_cmd_code, + v_prntq_info->parent_sts_scq_info.cqm_queue_id); + } else { + /* Fill in HIFCOE_TASK_T_ELS */ + ret = hifc_get_els_req_and_acc_pld_len(els_cmd_code, + &frame_pld_len, + &acc_pld_len); + if (ret != RETURN_OK) + return ret; + + hifc_build_els_wqe_ts_req( + &sqe, sq_info, els_cmd_code, + v_prntq_info->parent_sts_scq_info.cqm_queue_id, + frame_pld_addr); + } + + /* Assemble the magicnum field of the els */ + hifc_build_els_wqe_ts_magic_num(&sqe, els_cmnd_type, + UNF_GETXCHGALLOCTIME(v_pkg)); + + /* Assemble the SQE Control Section part */ + hifc_build_service_wqe_ctrl_section( + &sqe.ctrl_sl, + HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE), + HIFC_BYTES_TO_QW_NUM(sizeof(struct hifcoe_variable_sge_s))); + + /* Assemble the SQE Task Section Els Common part */ + hifc_build_service_wqe_ts_common(&sqe.ts_sl, sq_info->rport_index, + local_xid, remote_xid, + HIFC_LSW(frame_pld_len)); + + /* Build SGE */ + hifc_build_els_gs_wqe_sge(&sqe, frame_pld_addr, fram_phy_addr, + frame_pld_len, sq_info->context_id, v_hba); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x)", + hba->port_cfg.port_id, sq_info->rport_index, els_cmnd_type, + els_cmd_code, local_xid); + + ret = hifc_parent_sq_enqueue(sq_info, &sqe); + + return ret; +} + +unsigned int hifc_send_els_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + unsigned short els_cmd_code = UNF_ZERO; + unsigned short els_rsp_code = UNF_ZERO; + union unf_sfs_u *fc_entry = NULL; + struct unf_rrq_s *rrq_pld = NULL; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + /* Check Parameters */ + UNF_CHECK_VALID(0x5014, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x5015, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x5016, UNF_TRUE, UNF_GET_SFS_ENTRY(v_pkg), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x5017, UNF_TRUE, HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg), + return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + + hba = (struct hifc_hba_s *)v_hba; + els_cmd_code = HIFC_GET_ELS_CMND_CODE(v_pkg->cmnd); + els_rsp_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd); + + /* If RRQ Req, Special processing */ + if (els_cmd_code == ELS_RRQ) { + fc_entry = UNF_GET_SFS_ENTRY(v_pkg); + rrq_pld = &fc_entry->rrq; + ox_id = (unsigned short)(rrq_pld->oxid_rxid >> 16); + rx_id = (unsigned short)(rrq_pld->oxid_rxid & 0xFFFF); + ox_id += hba->exit_base; + rrq_pld->oxid_rxid = ox_id << 16 | rx_id; + } + + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!prnt_qinfo) { + HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) send ELS SID(0x%x) DID(0x%x) get a null parent queue info, send via root", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + /* If the Rport cannot be found, Send Pkg by Root SQ */ + ret = hifc_send_els_via_root(v_hba, v_pkg); + return ret; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + /* After offload, Send Pkg by Parent SQ */ + if (HIFC_RPORT_OFFLOADED(prnt_qinfo)) { + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + ret = hifc_send_els_via_parent(v_hba, v_pkg, prnt_qinfo); + } else { + /* Before offload, Send Pkg by Root SQ */ + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + ret = hifc_send_els_via_root(v_hba, v_pkg); + } + + return ret; +} + +unsigned int hifc_rq_rcv_els_rsp_sts( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_cs_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rx_id = (~0); + struct unf_frame_pkg_s pkg = { 0 }; + + rx_id = (unsigned int)v_cs_info->exch_id - v_hba->exit_base; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = v_cs_info->magic_num; + + ret = hifc_rcv_els_rsp_sts(v_hba, &pkg, rx_id); + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_ELS_RSP_STS); + + return ret; +} + +static unsigned int hifc_recv_els_rsp_payload(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id, + unsigned char *v_els_pld_buf, + unsigned int pld_len) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_ELS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Payload Buffer in ROOT SQ Buffer */ + v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_els_pld_buf; + v_pkg->unf_cmnd_pload_bl.length = pld_len; + v_pkg->byte_orders |= HIFC_BIT_2; + + /* Mark as a non-last block */ + v_pkg->last_pkg_flag = UNF_PKG_NOT_LAST_RESPONSE; + + UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +static unsigned int hifc_rq_rcv_els_frame(struct hifc_hba_s *v_hba, + unsigned char *v_frame, + unsigned int frame_len, + unsigned short pkg_flag, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + unsigned int pld_len = 0; + unsigned char *plg_buf = NULL; + unsigned long flags = 0; + + plg_buf = v_frame; + pld_len = frame_len; + + v_pkg->status = UNF_IO_SUCCESS; + + if (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == + HIFC_FC_RCTL_ELS_RSP) { + ox_id = v_pkg->frame_head.oxid_rxid >> 16; + + if (!(HIFC_XID_IS_VALID(ox_id, (unsigned int)v_hba->exit_base, + (unsigned int)v_hba->exit_count))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Port(0x%x) ExchId(0x%x) isn't in 0x%x~0x%x", + v_hba->port_cfg.port_id, ox_id, + v_hba->exit_base, + v_hba->exit_base + v_hba->exit_count - 1); + + goto rq_recv_error_els_frame; + } + + ox_id -= v_hba->exit_base; + + ret = hifc_recv_els_rsp_payload(v_hba, v_pkg, ox_id, plg_buf, + pld_len); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) receive ESL RSP payload error, OXID(0x%x) RXID(0x%x) PldLen(0x%x)", + v_hba->port_cfg.port_id, UNF_GET_OXID(v_pkg), + UNF_GET_RXID(v_pkg), pld_len); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_RSP); + } + + if (HIFC_CHECK_IF_LAST_PKG(pkg_flag)) { + ret = hifc_rcv_els_rsp(v_hba, v_pkg, ox_id); + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_RSP); + } + } else if (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == + HIFC_FC_RCTL_ELS_REQ) { + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_CMD); + + if (HIFC_CHECK_IF_FIRST_PKG(pkg_flag)) + v_pkg->xchg_contex = NULL; + + v_pkg->last_pkg_flag = (HIFC_CHECK_IF_LAST_PKG(pkg_flag)) ? + UNF_PKG_LAST_REQUEST : UNF_PKG_NOT_LAST_REQUEST; + + ret = hifc_rcv_els_cmnd(v_hba, v_pkg, plg_buf, pld_len, + HIFC_CHECK_IF_FIRST_PKG(pkg_flag)); + + spin_lock_irqsave(&v_hba->delay_info.srq_lock, flags); + if (v_hba->delay_info.srq_delay_flag) { + v_hba->delay_info.srq_delay_flag = 0; + + if (!cancel_delayed_work(&v_hba->delay_info.del_work)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) rcvd plogi from srq process delay timer maybe timeout", + v_hba->port_cfg.port_id); + } + spin_unlock_irqrestore(&v_hba->delay_info.srq_lock, + flags); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[info]Port(0x%x) received els from root rq and send delay plogi to CM", + v_hba->port_cfg.port_id); + + hifc_rcv_els_cmnd( + v_hba, &v_hba->delay_info.pkg, + v_hba->delay_info.pkg.unf_cmnd_pload_bl.buffer_ptr, + 0, UNF_FALSE); + } else { + spin_unlock_irqrestore(&v_hba->delay_info.srq_lock, + flags); + } + + } else { + goto rq_recv_error_els_frame; + } + + return ret; + +rq_recv_error_els_frame: + return HIFC_RQ_ERROR_FRAME; +} + +static unsigned int hifc_rq_rcv_bls_frame(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = RETURN_OK; + unsigned int ox_id = INVALID_VALUE32; + + v_pkg->status = UNF_IO_SUCCESS; + + if ((UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == HIFC_RCTL_BLS_ACC) || + (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == HIFC_RCTL_BLS_RJT)) { + /* INI Mode */ + ox_id = UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head); + if ((ox_id < (unsigned int)v_hba->exit_base) || + (ox_id >= (unsigned int)(v_hba->exit_base + + v_hba->exit_count))) { + goto rq_recv_error_bls_frame; + } + ox_id -= v_hba->exit_base; + + ret = hifc_rcv_bls_rsp(v_hba, v_pkg, ox_id); + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ABTS_RSP); + } else { + goto rq_recv_error_bls_frame; + } + + return ret; + +rq_recv_error_bls_frame: + return HIFC_RQ_ERROR_FRAME; +} + +static unsigned int hifc_rq_rcv_service_frame(struct hifc_hba_s *v_hba, + unsigned char *v_frame, + unsigned int frame_len, + unsigned short pkg_flag, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned char fc_frame_type = 0; + + fc_frame_type = UNF_GET_FC_HEADER_TYPE(&v_pkg->frame_head); + + if (fc_frame_type == HIFC_FC_TYPE_ELS) { + v_hba->delay_info.root_rq_rcvd_flag = 1; + ret = hifc_rq_rcv_els_frame(v_hba, v_frame, frame_len, + pkg_flag, v_pkg); + } else if (fc_frame_type == HIFC_FC_TYPE_BLS) { + ret = hifc_rq_rcv_bls_frame(v_hba, v_pkg); + } else { + ret = HIFC_RQ_ERROR_FRAME; + } + + if (ret == HIFC_RQ_ERROR_FRAME) { + /* Error statistics are collected when an invalid frame + * is received + */ + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_BUTT); + + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[info]Port(0x%x) Receive an unsupported frame, Rctl(0x%x), Type(0x%x), Fctl(0x%x), Sid_Did(0x%x_0x%x),OxId_RxId(0x%x_0x%x), FrameLen(0x%x), drop it", + v_hba->port_cfg.port_id, + UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head), + UNF_GET_FC_HEADER_TYPE(&v_pkg->frame_head), + UNF_GET_FC_HEADER_FCTL(&v_pkg->frame_head), + UNF_GET_FC_HEADER_SID(&v_pkg->frame_head), + UNF_GET_FC_HEADER_DID(&v_pkg->frame_head), + UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head), + UNF_GET_FC_HEADER_RXID(&v_pkg->frame_head), + frame_len); + } + + return ret; +} + +unsigned int hifc_rcv_service_frame_from_rq(struct hifc_hba_s *v_hba, + struct hifc_root_rq_info_s + *v_rq_info, + struct hifc_root_rq_complet_info_s + *v_complet_info, + unsigned short v_rcv_buf_num) +{ + unsigned short remain_len = 0; + unsigned short rcv_len = 0; + unsigned short pkg_flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short pkt_len = 0; + void *root_rq_rcv_buf = NULL; + unsigned short ci = 0; + unsigned int loop = 0; + struct unf_frame_pkg_s pkg = { 0 }; + struct unf_fchead_s *els_frame = NULL; + unsigned char *pld_buf = NULL; + unsigned int pld_len = 0; + + ci = v_rq_info->ci; + pkt_len = v_complet_info->buf_length; + memset(&pkg, 0, sizeof(pkg)); + + for (loop = 0; loop < v_rcv_buf_num; loop++) { + /* Obtain rcv buffer */ + root_rq_rcv_buf = + (void *)((unsigned long long)v_rq_info->rq_rcv_buff + + HIFC_ROOT_RQ_RECV_BUFF_SIZE * ci); + + /* Calculate the frame data address and length */ + els_frame = (struct unf_fchead_s *)root_rq_rcv_buf; + rcv_len = HIFC_ROOT_RQ_RECV_BUFF_SIZE; + pkg_flag = 0; + + if (loop == (v_rcv_buf_num - 1)) { + pkg_flag |= HIFC_LAST_PKG_FLAG; + remain_len = pkt_len % HIFC_ROOT_RQ_RECV_BUFF_SIZE; + rcv_len = (remain_len > 0) ? (remain_len) : + HIFC_ROOT_RQ_RECV_BUFF_SIZE; + } + + /* Calculate the frame data address and length */ + if (loop == 0) { + pkg_flag |= HIFC_FIRST_PKG_FLAG; + + memcpy(&pkg.frame_head, els_frame, + sizeof(pkg.frame_head)); + hifc_big_to_cpu32(&pkg.frame_head, + sizeof(pkg.frame_head)); + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_complet_info->magic_num; + + pld_buf = (unsigned char *)(els_frame + 1); + pld_len = rcv_len - sizeof(pkg.frame_head); + } else { + pld_buf = (unsigned char *)els_frame; + pld_len = rcv_len; + } + + /* Processing the rqe sent by the FC ucode */ + ret = hifc_rq_rcv_service_frame(v_hba, pld_buf, pld_len, + pkg_flag, &pkg); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "[err]Up layer Process RQE frame or status abnormal(0x%x)", + ret); + + return UNF_RETURN_ERROR; + } + + ci = ((ci + 1) < v_rq_info->q_depth) ? (ci + 1) : 0; + } + + return RETURN_OK; +} + +static unsigned int hifc_rcv_gs_rsp_payload(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id, + unsigned char *v_els_pld_buf, + unsigned int pld_len) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_GS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Convert to small endian */ + hifc_big_to_cpu32(v_els_pld_buf, pld_len); + + /* Payload Buffer in ROOT SQ Buffer */ + v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_els_pld_buf; + v_pkg->unf_cmnd_pload_bl.length = pld_len; + + /* Mark as a non-last block */ + v_pkg->last_pkg_flag = UNF_PKG_NOT_LAST_RESPONSE; + + UNF_LOWLEVEL_RECEIVE_GS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +static unsigned int hifc_scq_rcv_abts_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* Default path, which is sent from SCQ to the driver */ + unsigned char status = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_abts_rsp_s *abts_rsp = NULL; + + abts_rsp = &v_scqe->rcv_abts_rsp; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = abts_rsp->magic_num; + + ox_id = (unsigned int)(abts_rsp->wd0.ox_id); + + if (unlikely((ox_id < (unsigned int)v_hba->exit_base) || + (ox_id >= + (unsigned int)(v_hba->exit_base + v_hba->exit_count)))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) has bad OX_ID(0x%x) for bls_rsp", + v_hba->port_cfg.port_id, ox_id); + + return UNF_RETURN_ERROR; + } + + ox_id -= v_hba->exit_base; + + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) BLS response has error code(0x%x) tag(0x%x)", + v_hba->port_cfg.port_id, + HIFC_GET_SCQE_STATUS(v_scqe), + (unsigned int)(abts_rsp->wd0.ox_id)); + + status = UNF_IO_FAILED; + } else { + pkg.frame_head.rctl_did = abts_rsp->wd3.did; + pkg.frame_head.csctl_sid = abts_rsp->wd4.sid; + pkg.frame_head.oxid_rxid = (unsigned int)(abts_rsp->wd0.rx_id) + | ox_id << 16; + + /* BLS_ACC/BLS_RJT: IO_succeed */ + if (abts_rsp->wd2.fh_rctrl == HIFC_RCTL_BLS_ACC) { + status = UNF_IO_SUCCESS; + } else if (abts_rsp->wd2.fh_rctrl == HIFC_RCTL_BLS_RJT) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) ABTS RJT: %08x-%08x-%08x", + v_hba->port_cfg.port_id, + abts_rsp->payload[0], + abts_rsp->payload[1], abts_rsp->payload[2]); + + status = UNF_IO_SUCCESS; + } else { + /* 3. BA_RSP type is err: IO_failed */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) BLS response RCTL is error", + v_hba->port_cfg.port_id); + + HIFC_ERR_IO_STAT(v_hba, HIFC_SCQE_ABTS_RSP); + + status = UNF_IO_FAILED; + } + } + + /* Set PKG/exchange status & Process BLS_RSP */ + pkg.status = status; + ret = hifc_rcv_bls_rsp(v_hba, &pkg, ox_id); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) recv ABTS rsp OX_ID(0x%x) RX_ID(0x%x) SID(0x%x) DID(0x%x) %s", + v_hba->port_cfg.port_id, + ox_id, + abts_rsp->wd0.rx_id, + abts_rsp->wd4.sid, + abts_rsp->wd3.did, + (ret == RETURN_OK) ? "OK" : "ERROR"); + + return ret; +} + +unsigned int hifc_rq_rcv_srv_err(struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_cs_info) +{ + UNF_REFERNCE_VAR(v_hba); + UNF_REFERNCE_VAR(v_cs_info); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]hifc_rq_rcv_srv_err not implemented yet"); + + if (!v_hba) + return UNF_RETURN_ERROR; + + if (!v_cs_info) + return UNF_RETURN_ERROR; + + return UNF_RETURN_ERROR; +} + +unsigned int hifc_rcv_els_cmnd(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned char *v_pld, + unsigned int pld_len, + int first_frame) +{ + unsigned int ret = UNF_RETURN_ERROR; + + /* Convert Payload to small endian */ + hifc_big_to_cpu32(v_pld, pld_len); + + v_pkg->type = UNF_PKG_ELS_REQ; + + v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_pld; + + /* Payload length */ + v_pkg->unf_cmnd_pload_bl.length = pld_len; + + /* Obtain the Cmnd type from the Paylaod. The Cmnd is in small endian */ + if (first_frame == UNF_TRUE) { + v_pkg->cmnd = UNF_GET_FC_PAYLOAD_ELS_CMND( + v_pkg->unf_cmnd_pload_bl.buffer_ptr); + } + + /* Errors have been processed in HIFC_RecvElsError */ + v_pkg->status = UNF_IO_SUCCESS; + + /* Send PKG to the CM layer */ + UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_els_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + /* Receive CmndReqSts */ + v_pkg->type = UNF_PKG_ELS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + v_pkg->byte_orders |= HIFC_BIT_2; + + /* Mark the last block */ + v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; + + /* Send PKG to the CM layer */ + UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_els_rsp_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int rx_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_ELS_REPLY_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = rx_id; + + UNF_LOWLEVEL_SEND_ELS_DONE(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_gs_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + /* Receive CmndReqSts */ + v_pkg->type = UNF_PKG_GS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Mark the last block */ + v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; + + /* Send PKG to the CM layer */ + UNF_LOWLEVEL_RECEIVE_GS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_bls_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + /* + * 1. SCQ (normal) + * 2. from Root RQ (parent no existence) + ** + * single frame, single sequence + */ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_BLS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; + + UNF_LOWLEVEL_RECEIVE_BLS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_tmf_marker_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Send PKG info to COM */ + UNF_LOWLEVEL_RECEIVE_MARKER_STS(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_abts_marker_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(ret, v_hba->lport, v_pkg); + + return ret; +} + +void hifc_scqe_error_pre_process(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* Currently, only printing and statistics collection are performed */ + HIFC_ERR_IO_STAT(v_hba, HIFC_GET_SCQE_TYPE(v_scqe)); + HIFC_SCQ_ERR_TYPE_STAT(v_hba, HIFC_GET_SCQE_STATUS(v_scqe)); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_ABNORMAL, UNF_WARN, + "[warn]Port(0x%x)-Task_type(%u) SCQE contain error code(%u), additional info(0x%x)", + v_hba->port_cfg.port_id, + v_scqe->common.ch.wd0.task_type, + v_scqe->common.ch.wd0.err_code, + v_scqe->common.conn_id); +} + +unsigned int hifc_rcv_scqe_entry_from_scq(void *v_hba, void *v_scqe, + unsigned int scq_idx) +{ + unsigned int ret = UNF_RETURN_ERROR; + int do_reclaim = UNF_FALSE; + unsigned int index = 0; + unsigned int total_index = 0; + struct hifc_hba_s *hba = NULL; + union hifcoe_scqe_u *scqe = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scqe, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, HIFC_TOTAL_SCQ_NUM > scq_idx, + return UNF_RETURN_ERROR); + + scqe = (union hifcoe_scqe_u *)v_scqe; + hba = (struct hifc_hba_s *)v_hba; + + HIFC_IO_STAT(hba, HIFC_GET_SCQE_TYPE(scqe)); + + /* 1. error code cheking */ + if (unlikely(HIFC_SCQE_HAS_ERRCODE(scqe))) { + /* So far, just print & counter */ + hifc_scqe_error_pre_process(hba, scqe); + } + + /* 2. Process SCQE by corresponding processer */ + total_index = sizeof(scqe_handler_table) / + sizeof(struct unf_scqe_handler_table_s); + while (index < total_index) { + if (HIFC_GET_SCQE_TYPE(scqe) == + scqe_handler_table[index].scqe_type) { + ret = scqe_handler_table[index].pfn_scqe_handle_fun( + hba, scqe); + do_reclaim = scqe_handler_table[index].reclaim_sq_wpg; + + break; + } + + index++; + } + + /* 3. SCQE type check */ + if (unlikely(index == total_index)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Unknown SCQE type %d", + HIFC_GET_SCQE_TYPE(scqe)); + + UNF_PRINT_SFS_LIMIT(UNF_ERR, hba->port_cfg.port_id, scqe, + sizeof(union hifcoe_scqe_u)); + } + + /* 4. If SCQE is for SQ-WQE then recovery Link List SQ free page */ + if (do_reclaim == UNF_TRUE) { + if (HIFC_SCQE_CONN_ID_VALID(scqe)) { + ret = hifc_reclaim_sq_wqe_page(v_hba, scqe); + } else { + /* NOTE: for buffer clear, the SCQE conn_id is 0xFFFF, + * count with HBA + */ + HIFC_HBA_STAT( + (struct hifc_hba_s *)v_hba, + HIFC_STAT_SQ_IO_BUFFER_CLEARED); + } + } + + return ret; +} + +static void *hifc_get_els_buf_by_userid(struct hifc_hba_s *v_hba, + unsigned short user_id) +{ + struct hifc_srq_buff_entry_s *buf_entry = NULL; + struct hifc_srq_info_s *srq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return NULL); + + srq_info = &v_hba->els_srq_info; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + user_id < srq_info->valid_wqe_num, return NULL); + + buf_entry = &srq_info->els_buff_entry_head[user_id]; + + return buf_entry->buff_addr; +} + +static unsigned int hifc_check_srq_buf_valid(struct hifc_hba_s *v_hba, + unsigned int *v_buf_id, + unsigned int v_buf_num) +{ + unsigned int index = 0; + unsigned int buf_id = 0; + void *srq_buf = NULL; + + for (index = 0; index < v_buf_num; index++) { + buf_id = v_buf_id[index]; + + if (buf_id < v_hba->els_srq_info.valid_wqe_num) { + srq_buf = hifc_get_els_buf_by_userid( + v_hba, + (unsigned short)buf_id); + } else { + srq_buf = NULL; + } + + if (!srq_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get srq buffer user id(0x%x) is null", + v_hba->port_cfg.port_id, buf_id); + + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +static void hifc_reclaim_srq_buff(struct hifc_hba_s *v_hba, + unsigned int *v_buf_id, + unsigned int v_buf_num) +{ + unsigned int index = 0; + unsigned int buf_id = 0; + void *srq_buf = NULL; + + for (index = 0; index < v_buf_num; index++) { + buf_id = v_buf_id[index]; + if (buf_id < v_hba->els_srq_info.valid_wqe_num) { + srq_buf = hifc_get_els_buf_by_userid( + v_hba, + (unsigned short)buf_id); + } else { + srq_buf = NULL; + } + + /* If the value of buffer is NULL, it indicates that the value + * of buffer is invalid. In this case, exit directly. + */ + if (!srq_buf) + break; + + hifc_post_els_srq_wqe(&v_hba->els_srq_info, + (unsigned short)buf_id); + } +} + +static unsigned int hifc_check_els_gs_valid(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe, + struct unf_frame_pkg_s *v_pkg, + unsigned int *v_buf_id, + unsigned int buf_num, + unsigned int frame_len) +{ + unsigned int ox_id = INVALID_VALUE32; + + ox_id = v_pkg->frame_head.oxid_rxid >> 16; + + /* The ELS CMD returns an error code and discards it directly */ + if ((sizeof(struct hifc_fc_frame_header) > frame_len) || + (HIFC_SCQE_HAS_ERRCODE(v_scqe)) || + (buf_num > HIFC_ELS_SRQ_BUF_NUM)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) get scqe type(0x%x) payload len(0x%x),scq status(0x%x),user id num(0x%x) abnormal", + v_hba->port_cfg.port_id, + HIFC_GET_SCQE_TYPE(v_scqe), + frame_len, + HIFC_GET_SCQE_STATUS(v_scqe), + buf_num); + + /* ELS RSP Special Processing */ + if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_RSP) { + if (HIFC_SCQE_ERR_TO_CM(v_scqe)) { + v_pkg->status = UNF_IO_FAILED; + (void)hifc_rcv_els_rsp(v_hba, v_pkg, ox_id); + } else { + HIFC_HBA_STAT(v_hba, + HIFC_STAT_ELS_RSP_EXCH_REUSE); + } + } + + /* GS RSP Special Processing */ + if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_GS_RSP) { + if (HIFC_SCQE_ERR_TO_CM(v_scqe)) { + v_pkg->status = UNF_IO_FAILED; + (void)hifc_rcv_gs_rsp(v_hba, v_pkg, ox_id); + } else { + HIFC_HBA_STAT(v_hba, + HIFC_STAT_GS_RSP_EXCH_REUSE); + } + } + + /* Reclaim srq */ + if (buf_num <= HIFC_ELS_SRQ_BUF_NUM) + hifc_reclaim_srq_buff(v_hba, v_buf_id, buf_num); + + return UNF_RETURN_ERROR; + } + + /* ELS CMD Check the validity of the buffer sent by the ucode */ + if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_CMND) { + if (hifc_check_srq_buf_valid(v_hba, v_buf_id, buf_num) != + RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get els cmnd scqe user id num(0x%x) abnormal, as some srq buff is null", + v_hba->port_cfg.port_id, buf_num); + + hifc_reclaim_srq_buff(v_hba, v_buf_id, buf_num); + + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +static unsigned int hifc_scq_rcv_els_cmd(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = RETURN_OK; + unsigned int pld_len = 0; + unsigned int hdr_len = 0; + unsigned int frame_len = 0; + unsigned int rcv_data_len = 0; + unsigned int max_buf_num = 0; + unsigned short buf_id = 0; + unsigned int index = 0; + unsigned char *pld = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_els_cmd_s *els_cmd = NULL; + struct hifc_fc_frame_header *els_frame = NULL; + struct hifc_fc_frame_header local_fc_frame = { 0 }; + void *els_buf = NULL; + int first_frame = UNF_FALSE; + unsigned long flags = 0; + unsigned char srq_delay_flag = 0; + + els_cmd = &v_scqe->rcv_els_cmd; + frame_len = els_cmd->wd3.data_len; + max_buf_num = els_cmd->wd3.user_id_num; + + pkg.xchg_contex = NULL; + pkg.status = UNF_IO_SUCCESS; + + /* Check the validity of error codes and buff. If an exception occurs, + * discard the error code + */ + ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, els_cmd->user_id, + max_buf_num, frame_len); + if (ret != RETURN_OK) + return RETURN_OK; + + /* Send data to COM cyclically */ + for (index = 0; index < max_buf_num; index++) { + /* Exception record, which is not processed currently */ + if (rcv_data_len >= frame_len) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get els cmd date len(0x%x) is bigger than fream len(0x%x)", + v_hba->port_cfg.port_id, + rcv_data_len, frame_len); + } + + buf_id = (unsigned short)els_cmd->user_id[index]; + els_buf = hifc_get_els_buf_by_userid(v_hba, buf_id); + + /* Obtain playload address */ + pld = (unsigned char *)(els_buf); + hdr_len = 0; + first_frame = UNF_FALSE; + if (index == 0) { + els_frame = (struct hifc_fc_frame_header *)els_buf; + pld = (unsigned char *)(els_frame + 1); + + hdr_len = sizeof(struct hifc_fc_frame_header); + first_frame = UNF_TRUE; + + memcpy(&local_fc_frame, els_frame, + sizeof(struct hifc_fc_frame_header)); + hifc_big_to_cpu32(&local_fc_frame, + sizeof(struct hifc_fc_frame_header)); + memcpy(&pkg.frame_head, &local_fc_frame, + sizeof(pkg.frame_head)); + } + + /* Calculate the playload length */ + pkg.last_pkg_flag = 0; + pld_len = HIFC_SRQ_ELS_SGE_LEN; + + if ((rcv_data_len + HIFC_SRQ_ELS_SGE_LEN) >= frame_len) { + pkg.last_pkg_flag = 1; + pld_len = frame_len - rcv_data_len; + + if (unlikely( + (v_hba->active_topo == UNF_TOP_P2P_MASK) && + (v_hba->delay_info.root_rq_rcvd_flag == 0))) { + /* Only data is pushed for the first time, but + * the last packet flag is not set + */ + pkg.last_pkg_flag = 0; + srq_delay_flag = 1; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) revd els from srq, and need delay processed, topo(0x%x)", + v_hba->port_cfg.port_id, + v_hba->active_topo); + } + } + + /* Push data to COM */ + if (ret == RETURN_OK) { + ret = hifc_rcv_els_cmnd(v_hba, &pkg, pld, + (pld_len - hdr_len), + first_frame); + + /* If the plogi arrives before the flogi, the pkg is + * saved, and the last packet is pushed + * when the root rq contains content. + */ + if (unlikely(srq_delay_flag == 1)) { + spin_lock_irqsave(&v_hba->delay_info.srq_lock, + flags); + memcpy(&v_hba->delay_info.pkg, &pkg, + sizeof(pkg)); + v_hba->delay_info.srq_delay_flag = 1; + v_hba->delay_info.pkg.last_pkg_flag = 1; + + /* Add a 20-ms timer to prevent the root rq + * from processing data + */ + (void)queue_delayed_work( + v_hba->work_queue, + &v_hba->delay_info.del_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SRQ_PROCESS_DELAY_MS)); + + spin_unlock_irqrestore( + &v_hba->delay_info.srq_lock, flags); + } + } + + /* Reclaim srq buffer */ + hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id); + + rcv_data_len += pld_len; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) recv ELS Type(0x%x) Cmnd(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) %u", + v_hba->port_cfg.port_id, + pkg.type, + pkg.cmnd, + els_cmd->wd2.ox_id, + els_cmd->wd2.rx_id, + els_cmd->wd1.sid, + els_cmd->wd0.did, + ret); + + return ret; +} + +static unsigned int hifc_get_els_gs_pld_len(struct hifc_hba_s *v_hba, + unsigned int v_rcv_data_len, + unsigned int v_frame_len) +{ + unsigned int pld_len; + + /* Exception record, which is not processed currently */ + if (v_rcv_data_len >= v_frame_len) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get els rsp date len(0x%x) is bigger than fream len(0x%x)", + v_hba->port_cfg.port_id, + v_rcv_data_len, v_frame_len); + } + + pld_len = HIFC_SRQ_ELS_SGE_LEN; + if ((v_rcv_data_len + HIFC_SRQ_ELS_SGE_LEN) >= v_frame_len) + pld_len = v_frame_len - v_rcv_data_len; + + return pld_len; +} + +static unsigned int hifc_scq_rcv_els_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = RETURN_OK; + unsigned int pld_len = 0; + unsigned int hdr_len = 0; + unsigned int frame_len = 0; + unsigned int rcv_data_len = 0; + unsigned int max_buf_num = 0; + unsigned short buf_id = 0; + unsigned int index = 0; + unsigned int ox_id = (~0); + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_els_gs_rsp_s *els_rsp; + struct hifc_fc_frame_header *els_frame = NULL; + void *els_buf = NULL; + unsigned char *pld = NULL; + + els_rsp = &v_scqe->rcv_els_gs_rsp; + frame_len = els_rsp->wd2.data_len; + max_buf_num = els_rsp->wd4.user_id_num; + + ox_id = (unsigned int)(els_rsp->wd1.ox_id) - v_hba->exit_base; + pkg.frame_head.oxid_rxid = (unsigned int)(els_rsp->wd1.rx_id) | + ox_id << 16; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = els_rsp->magic_num; + pkg.frame_head.csctl_sid = els_rsp->wd4.sid; + pkg.frame_head.rctl_did = els_rsp->wd3.did; + pkg.status = UNF_IO_SUCCESS; + + /* Handle the exception first. The ELS RSP returns the error code. + * Only the OXID can submit the error code to the CM layer. + */ + ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, + els_rsp->user_id, max_buf_num, frame_len); + if (ret != RETURN_OK) + return RETURN_OK; + + /* if this is echo rsp */ + if (els_rsp->wd3.echo_rsp == UNF_TRUE) { + /* echo time stamp fill in the Els rsp user_id last 4dword */ + pkg.private[PKG_PRIVATE_ECHO_CMD_RCV_TIME] = + els_rsp->user_id[5]; + pkg.private[PKG_PRIVATE_ECHO_RSP_SND_TIME] = + els_rsp->user_id[6]; + pkg.private[PKG_PRIVATE_ECHO_CMD_SND_TIME] = + els_rsp->user_id[7]; + pkg.private[PKG_PRIVATE_ECHO_ACC_RCV_TIME] = + els_rsp->user_id[8]; + } + + /* Send data to COM cyclically */ + for (index = 0; index < max_buf_num; index++) { + /* Obtain buffer address */ + els_buf = NULL; + buf_id = (unsigned short)els_rsp->user_id[index]; + + els_buf = hifc_get_els_buf_by_userid(v_hba, buf_id); + + /* If the value of buffer is NULL, the buff id is abnormal and + * exits directly + */ + if (unlikely(!els_buf)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) Index(0x%x) get els rsp buff user id(0x%x) abnormal", + v_hba->port_cfg.port_id, ox_id, + els_rsp->wd1.rx_id, els_rsp->wd4.sid, + els_rsp->wd3.did, index, buf_id); + + if (index == 0) { + pkg.status = UNF_IO_FAILED; + ret = hifc_rcv_els_rsp(v_hba, &pkg, ox_id); + } + + return ret; + } + + hdr_len = 0; + pld = (unsigned char *)(els_buf); + if (index == 0) { + hdr_len = sizeof(struct hifc_fc_frame_header); + + els_frame = (struct hifc_fc_frame_header *)els_buf; + pld = (unsigned char *)(els_frame + 1); + } + + /* Calculate the playload length */ + pld_len = hifc_get_els_gs_pld_len(v_hba, rcv_data_len, + frame_len); + + /* Push data to COM */ + if (ret == RETURN_OK) { + ret = hifc_recv_els_rsp_payload(v_hba, &pkg, ox_id, pld, + (pld_len - hdr_len)); + } + + /* Reclaim srq buffer */ + hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id); + + rcv_data_len += pld_len; + } + + if ((els_rsp->wd3.end_rsp) && (ret == RETURN_OK)) + ret = hifc_rcv_els_rsp(v_hba, &pkg, ox_id); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) receive ELS RSP OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) end_rsp(0x%x) user_num(0x%x)", + v_hba->port_cfg.port_id, + ox_id, + els_rsp->wd1.rx_id, + els_rsp->wd4.sid, + els_rsp->wd3.did, + els_rsp->wd3.end_rsp, + els_rsp->wd4.user_id_num); + + return ret; +} + +static unsigned int hifc_scq_rcv_gs_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = RETURN_OK; + unsigned int pld_len = 0; + unsigned int hdr_len = 0; + unsigned int frame_len = 0; + unsigned int rcv_data_len = 0; + unsigned int max_buf_num = 0; + unsigned short buf_id = 0; + unsigned int index = 0; + unsigned int ox_id = (~0); + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_els_gs_rsp_s *gs_rsp = NULL; + struct hifc_fc_frame_header *gs_frame = NULL; + void *gs_buf = NULL; + unsigned char *pld = NULL; + + gs_rsp = &v_scqe->rcv_els_gs_rsp; + frame_len = gs_rsp->wd2.data_len; + max_buf_num = gs_rsp->wd4.user_id_num; + + ox_id = (unsigned int)(gs_rsp->wd1.ox_id) - v_hba->exit_base; + pkg.frame_head.oxid_rxid = (unsigned int)(gs_rsp->wd1.rx_id) | + ox_id << 16; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = gs_rsp->magic_num; + pkg.frame_head.csctl_sid = gs_rsp->wd4.sid; + pkg.frame_head.rctl_did = gs_rsp->wd3.did; + pkg.status = UNF_IO_SUCCESS; + + if (gs_rsp->wd3.end_rsp) + HIFC_HBA_STAT(v_hba, HIFC_STAT_LAST_GS_SCQE); + + /* Exception handling: The GS RSP returns an error code. Only the OXID + * can submit the error code to the CM layer + */ + ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, gs_rsp->user_id, + max_buf_num, frame_len); + if (ret != RETURN_OK) + return RETURN_OK; + + /* Send data to COM cyclically */ + for (index = 0; index < max_buf_num; index++) { + /* Obtain buffer address */ + gs_buf = NULL; + buf_id = (unsigned short)gs_rsp->user_id[index]; + + gs_buf = hifc_get_els_buf_by_userid(v_hba, buf_id); + + if (unlikely(!gs_buf)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) Index(0x%x) get gs rsp scqe user id(0x%x) abnormal", + v_hba->port_cfg.port_id, ox_id, + gs_rsp->wd1.rx_id, gs_rsp->wd4.sid, + gs_rsp->wd3.did, index, buf_id); + + if (index == 0) { + pkg.status = UNF_IO_FAILED; + ret = hifc_rcv_gs_rsp(v_hba, &pkg, ox_id); + } + + return ret; + } + + /* Obtain playload address */ + hdr_len = 0; + pld = (unsigned char *)(gs_buf); + if (index == 0) { + hdr_len = sizeof(struct hifc_fc_frame_header); + + gs_frame = (struct hifc_fc_frame_header *)gs_buf; + pld = (unsigned char *)(gs_frame + 1); + } + + /* Calculate the playload length */ + pld_len = hifc_get_els_gs_pld_len(v_hba, rcv_data_len, + frame_len); + + /* Push data to COM */ + if (ret == RETURN_OK) + ret = hifc_rcv_gs_rsp_payload(v_hba, &pkg, ox_id, pld, + (pld_len - hdr_len)); + + /* Reclaim srq buffer */ + hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id); + + rcv_data_len += pld_len; + } + + if ((gs_rsp->wd3.end_rsp) && (ret == RETURN_OK)) + ret = hifc_rcv_gs_rsp(v_hba, &pkg, ox_id); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) recv GS RSP OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) end_rsp(0x%x) user_num(0x%x)", + v_hba->port_cfg.port_id, + ox_id, + gs_rsp->wd1.rx_id, + gs_rsp->wd4.sid, + gs_rsp->wd3.did, + gs_rsp->wd3.end_rsp, + gs_rsp->wd4.user_id_num); + + return ret; +} + +static unsigned int hifc_scq_rcv_els_rsp_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rx_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_comm_rsp_sts_s *els_rsp_sts = NULL; + + els_rsp_sts = &v_scqe->comm_sts; + rx_id = (unsigned int)els_rsp_sts->wd0.rx_id; + rx_id = rx_id - v_hba->exit_base; + + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = els_rsp_sts->magic_num; + pkg.frame_head.oxid_rxid = rx_id | + (unsigned int)(els_rsp_sts->wd0.ox_id) << 16; + + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) + pkg.status = UNF_IO_FAILED; + else + pkg.status = UNF_IO_SUCCESS; + + ret = hifc_rcv_els_rsp_sts(v_hba, &pkg, rx_id); + + return ret; +} + +static unsigned int hifc_check_rport_is_valid( + const struct hifc_parent_queue_info_s *v_prntq_info, + unsigned int scqe_xid) +{ + if (v_prntq_info->parent_ctx.cqm_parent_ctx_obj) { + if ((v_prntq_info->parent_sq_info.context_id & + HIFC_CQM_XID_MASK) == (scqe_xid & HIFC_CQM_XID_MASK)) + return RETURN_OK; + } + + return UNF_RETURN_ERROR; +} + +static unsigned int hifc_scq_rcv_offload_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int rport_valid = UNF_RETURN_ERROR; + unsigned int rport_index = 0; + unsigned int cache_id = 0; + unsigned int local_ctx_id = 0; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + struct hifcoe_scqe_sess_sts_s *offload_sts = NULL; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + + offload_sts = &v_scqe->sess_sts; + rport_index = offload_sts->wd1.conn_id; + cache_id = offload_sts->wd2.cid; + local_ctx_id = offload_sts->wd0.xid_qpn; + + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x) is invalid, cache id(0x%x)", + v_hba->port_cfg.port_id, rport_index, cache_id); + + return UNF_RETURN_ERROR; + } + + prnt_qinfo = &v_hba->parent_queue_mgr->parent_queues[rport_index]; + + rport_valid = hifc_check_rport_is_valid(prnt_qinfo, local_ctx_id); + if (rport_valid != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x), context id(0x%x) is invalid", + v_hba->port_cfg.port_id, rport_index, local_ctx_id); + + return UNF_RETURN_ERROR; + } + + /* off_load failed */ + if (HIFC_GET_SCQE_STATUS(v_scqe) != HIFC_COMPLETION_STATUS_SUCCESS) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x), rport(0x%x), context id(0x%x), cache id(0x%x), offload failed", + v_hba->port_cfg.port_id, rport_index, + local_ctx_id, cache_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + prnt_qinfo->parent_sq_info.cache_id = cache_id; + prnt_qinfo->offload_state = HIFC_QUEUE_STATE_OFFLOADED; + atomic_set(&prnt_qinfo->parent_sq_info.sq_cashed, UNF_TRUE); + + if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == UNF_TRUE) { + destroy_sqe_info.valid = + prnt_qinfo->parent_sq_info.destroy_sqe.valid; + + destroy_sqe_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_index; + + destroy_sqe_info.time_out = + prnt_qinfo->parent_sq_info.destroy_sqe.time_out; + + destroy_sqe_info.start_jiff = + prnt_qinfo->parent_sq_info.destroy_sqe.start_jiff; + + destroy_sqe_info.rport_info.nport_id = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id; + destroy_sqe_info.rport_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index; + destroy_sqe_info.rport_info.port_name = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name; + + prnt_qinfo->parent_sq_info.destroy_sqe.valid = UNF_FALSE; + } + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, flag); + + hifc_pop_destroy_parent_queue_sqe((void *)v_hba, &destroy_sqe_info); + + return RETURN_OK; +} + +unsigned int hifc_get_gs_req_and_rsp_pld_len(unsigned short cmd_code, + unsigned int *v_gs_pld_len, + unsigned int *v_gs_rsp_pld_len) +{ + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_gs_pld_len, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_gs_rsp_pld_len, + return UNF_RETURN_ERROR); + + switch (cmd_code) { + case NS_GPN_ID: + *v_gs_pld_len = UNF_GPNID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GPNID_RSP_PAYLOAD_LEN; + break; + + case NS_GNN_ID: + *v_gs_pld_len = UNF_GNNID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GNNID_RSP_PAYLOAD_LEN; + break; + + case NS_GFF_ID: + *v_gs_pld_len = UNF_GFFID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GFFID_RSP_PAYLOAD_LEN; + break; + + case NS_GID_FT: + case NS_GID_PT: + *v_gs_pld_len = UNF_GID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; + break; + + case NS_RFT_ID: + *v_gs_pld_len = UNF_RFTID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN; + break; + + case NS_RFF_ID: + *v_gs_pld_len = UNF_RFFID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_RFFID_RSP_PAYLOAD_LEN; + break; + case NS_GA_NXT: + *v_gs_pld_len = UNF_GID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; + break; + + case NS_GIEL: + *v_gs_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Unknown GS commond type(0x%x)", cmd_code); + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int hifc_send_gs_via_parent(void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned short ox_id, rx_id; + unsigned short cmd_code = UNF_ZERO; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int gs_pld_len = UNF_ZERO; + unsigned int gs_rsp_pld_len = UNF_ZERO; + void *gs_pld_addr = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_sq_info_s *sq_info; + struct hifcoe_sqe_s sqe; + unsigned long long fram_phy_addr; + + hba = (struct hifc_hba_s *)v_hba; + + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + + sq_info = hifc_find_parent_sq_by_pkg(hba, v_pkg); + if (!sq_info) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Get NULL parent SQ information"); + + return ret; + } + + cmd_code = HIFC_GET_GS_CMND_CODE(v_pkg->cmnd); + + ret = hifc_get_gs_req_and_rsp_pld_len(cmd_code, &gs_pld_len, + &gs_rsp_pld_len); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) send GS SID(0x%x) DID(0x%x), get error GS request and response payload length", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + gs_pld_addr = (void *)(HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg)); + fram_phy_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr + + sizeof(struct unf_fchead_s); + + if (cmd_code == NS_GID_FT || cmd_code == NS_GID_PT) + gs_pld_addr = (void *)(UNF_GET_GID_PAYLOAD(v_pkg)); + + /* Assemble the SQE Control Section part */ + hifc_build_service_wqe_ctrl_section( + &sqe.ctrl_sl, + HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE), + HIFC_BYTES_TO_QW_NUM(sizeof(struct hifcoe_variable_sge_s))); + /* Assemble the SQE Task Section part */ + ox_id = UNF_GET_OXID(v_pkg) + hba->exit_base; + rx_id = UNF_GET_RXID(v_pkg); + hifc_build_service_wqe_ts_common(&sqe.ts_sl, + sq_info->rport_index, ox_id, + rx_id, HIFC_LSW(gs_pld_len)); + hifc_build_gs_wqe_ts_req(&sqe, UNF_GETXCHGALLOCTIME(v_pkg)); + + hifc_build_els_gs_wqe_sge(&sqe, gs_pld_addr, fram_phy_addr, gs_pld_len, + sq_info->context_id, v_hba); + + ret = hifc_parent_sq_enqueue(sq_info, &sqe); + + return ret; +} + +unsigned int hifc_send_gs_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + + UNF_CHECK_VALID(0x4913, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4914, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4915, UNF_TRUE, UNF_GET_SFS_ENTRY(v_pkg), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4916, UNF_TRUE, HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg), + return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + + hba = (struct hifc_hba_s *)v_hba; + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + + if (!prnt_qinfo) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) send GS SID(0x%x) DID(0x%x), get a null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + if (HIFC_RPORT_NOT_OFFLOADED(prnt_qinfo)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) send GS SID(0x%x) DID(0x%x), send GS Request before PLOGI", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + ret = hifc_send_gs_via_parent(v_hba, v_pkg); + + return ret; +} + +static unsigned int hifc_get_bls_pld_len(struct unf_frame_pkg_s *v_pkg, + unsigned int *v_frame_len) +{ + unsigned int ret = RETURN_OK; + unsigned int rctl = 0; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_frame_len, return UNF_RETURN_ERROR); + + rctl = UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head); + if (rctl == HIFC_RCTL_BLS_ACC) { + /* BA_ACC */ + *v_frame_len = sizeof(struct unf_ba_acc_s); + } else if (rctl == HIFC_RCTL_BLS_RJT) { + /* BA_RJT */ + *v_frame_len = sizeof(struct unf_ba_rjt_s); + } else { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[warn]PKG Rclt(0x%x) not BLS ACC or RJT", rctl); + + *v_frame_len = 0; + ret = UNF_RETURN_ERROR; + } + + return ret; +} + +static unsigned int hifc_send_bls_via_cmdq(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rctl = 0; + unsigned int bls_pld_len = 0; + unsigned short rx_id = INVALID_VALUE16; + unsigned short ox_id = INVALID_VALUE16; + unsigned short exch_id = INVALID_VALUE16; + unsigned char *bls_pld_addr = NULL; + union hifc_cmdqe_u cmdqe; + struct hifc_parent_sq_info_s *sq_info = NULL; + + sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg); + if (!sq_info) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", + v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + /* Determine whether the value is ACC or RTJ and obtain the payload + * length of the ABTS_RSP + */ + ret = hifc_get_bls_pld_len(v_pkg, &bls_pld_len); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cmdq send BLS PKG DID(0x%x) failed", + v_hba->port_index, v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + rctl = UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head); + exch_id = (v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) & 0xffff; + if ((exch_id == INVALID_VALUE16) && (rctl == HIFC_RCTL_BLS_ACC)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cmdq send BA_ACC with error RXID(0xffff)", + v_hba->port_index); + + return UNF_RETURN_ERROR; + } + + /* + * FC-FS-3 15.3.3.1 Description: + * The OX_ID and RX_ID shall be set to match the Exchange in which + * the ABTS frame was transmitted. + */ + rx_id = UNF_GET_FC_HEADER_RXID(&v_pkg->frame_head); + ox_id = UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head); + + if (exch_id != INVALID_VALUE16) { + exch_id = exch_id + v_hba->exit_base; + } else { + /* If the number is not an immediate number and the rxid is not + * allocated to the CM, the CM may correspond to the rjt. + */ + } + + memset(&cmdqe, 0, sizeof(cmdqe)); + hifc_build_cmdqe_common(&cmdqe, HIFC_CMDQE_ABTS_RSP, exch_id); + cmdqe.snd_abts_rsp.wd1.ox_id = ox_id; + cmdqe.snd_abts_rsp.wd1.port_id = v_hba->port_index; + cmdqe.snd_abts_rsp.wd1.payload_len = bls_pld_len; + cmdqe.snd_abts_rsp.wd1.rsp_type = ((rctl == HIFC_RCTL_BLS_ACC) ? 0 : 1); + cmdqe.snd_abts_rsp.wd2.conn_id = sq_info->rport_index; + cmdqe.snd_abts_rsp.wd2.scqn = hifc_get_rport_maped_sts_scqn(v_hba, + sq_info->rport_index); + cmdqe.snd_abts_rsp.wd3.xid = sq_info->context_id; + cmdqe.snd_abts_rsp.wd4.cid = sq_info->cache_id; + cmdqe.snd_abts_rsp.wd5.req_rx_id = rx_id; + bls_pld_addr = HIFC_GET_RSP_PAYLOAD_ADDR(v_pkg); + memcpy(cmdqe.snd_abts_rsp.payload, bls_pld_addr, bls_pld_len); + + /* Send the ABTS_RSP command via ROOT CMDQ. */ + ret = hifc_root_cmdq_enqueue(v_hba, &cmdqe, sizeof(cmdqe.snd_abts_rsp)); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) send ABTS_RSP OXID(0x%x) RXID(0x%x) EXCHID(0x%x)", + v_hba->port_cfg.port_id, sq_info->rport_index, ox_id, + rx_id, exch_id); + + return ret; +} + +static unsigned int hifc_send_bls_via_parent(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = INVALID_VALUE16; + unsigned short rx_id = INVALID_VALUE16; + struct hifcoe_sqe_s sqe; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + + UNF_CHECK_VALID(0x5015, UNF_TRUE, (v_pkg->type == UNF_PKG_BLS_REQ), + return UNF_RETURN_ERROR); + + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(v_hba, v_pkg); + if (!prnt_qinfo) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", + v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg); + if (!sq_info) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send ABTS SID_DID(0x%x_0x%x) with null parent queue information", + v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + rx_id = UNF_GET_RXID(v_pkg); + ox_id = UNF_GET_OXID(v_pkg) + v_hba->exit_base; + + /* Assemble the SQE Control Section part. + * The ABTS does not have Payload. bdsl=0 + */ + hifc_build_service_wqe_ctrl_section( + &sqe.ctrl_sl, + HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE), 0); + + /* Assemble the SQE Task Section BLS Common part. The value of DW2 + * of BLS WQE is Rsvd, and the value of DW2 is 0 + */ + hifc_build_service_wqe_ts_common(&sqe.ts_sl, sq_info->rport_index, + ox_id, rx_id, 0); + + /* Assemble the special part of the ABTS */ + hifc_build_bls_wqe_ts_req(&sqe, v_pkg->frame_head.parameter, + UNF_GETXCHGALLOCTIME(v_pkg)); + + ret = hifc_parent_sq_enqueue(sq_info, &sqe); + + return ret; +} + +unsigned int hifc_send_bls_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + + UNF_CHECK_VALID(0x4913, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4914, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4913, UNF_TRUE, UNF_PKG_BLS_REQ == v_pkg->type, + return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + hba = (struct hifc_hba_s *)v_hba; + + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!prnt_qinfo) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + if (HIFC_RPORT_OFFLOADED(prnt_qinfo)) { + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + /* INI: send ABTS_REQ via parent SQ */ + ret = hifc_send_bls_via_parent(hba, v_pkg); + + } else { + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + ret = hifc_send_bls_via_cmdq(hba, v_pkg); + } + + return ret; +} + +static unsigned int hifc_scq_rcv_flush_sq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* + * RCVD sq flush sts + * --->>> continue flush or clear done + */ + unsigned int ret = UNF_RETURN_ERROR; + + if (v_scqe->flush_sts.wd0.port_id != v_hba->port_index) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL, + "[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)", + v_hba->port_cfg.port_id, + v_scqe->clear_sts.wd0.port_id, + v_hba->port_index, + v_hba->q_set_stage); + + return UNF_RETURN_ERROR; + } + + if (v_scqe->flush_sts.wd0.last_flush) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_INFO, + "[info]Port(0x%x) flush sq(0x%x) done, stage(0x%x)", + v_hba->port_cfg.port_id, v_hba->next_clearing_sq, + v_hba->q_set_stage); + + /* If the Flush STS is last one, send cmd done */ + ret = hifc_clear_sq_wqe_done(v_hba); + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) continue flush sq(0x%x), stage(0x%x)", + v_hba->port_cfg.port_id, v_hba->next_clearing_sq, + v_hba->q_set_stage); + + ret = hifc_clear_pending_sq_wqe(v_hba); + } + + return ret; +} + +static unsigned int hifc_scq_rcv_buf_clear_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* + * clear: fetched sq wqe + * ---to--->>> pending sq wqe + */ + unsigned int ret = UNF_RETURN_ERROR; + + if (v_scqe->clear_sts.wd0.port_id != v_hba->port_index) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL, + "[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)", + v_hba->port_cfg.port_id, + v_scqe->clear_sts.wd0.port_id, + v_hba->port_index, + v_hba->q_set_stage); + + return UNF_RETURN_ERROR; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[info]Port(0x%x) cleared all fetched wqe, start clear sq pending wqe, stage (0x%x)", + v_hba->port_cfg.port_id, v_hba->q_set_stage); + + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHING; + ret = hifc_clear_pending_sq_wqe(v_hba); + + return ret; +} + +static unsigned int hifc_scq_rcv_sess_rst_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int rport_index = INVALID_VALUE32; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *parent_queue_info = NULL; + struct hifcoe_scqe_sess_sts_s *sess_sts = + (struct hifcoe_scqe_sess_sts_s *)(void *)v_scqe; + unsigned int ctx_flush_done; + unsigned int *ctx_dw = NULL; + int ret; + + rport_index = sess_sts->wd1.conn_id; + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive reset session cmd sts failed, invlaid rport_index(0x%x) status_code(0x%x) remain_cnt(0x%x)", + v_hba->port_cfg.port_id, + rport_index, + sess_sts->ch.wd0.err_code, + sess_sts->ch.wd0.cqe_remain_cnt); + + return UNF_RETURN_ERROR; + } + + parent_queue_info = + &v_hba->parent_queue_mgr->parent_queues[rport_index]; + + /* + * If only session reset is used, the offload status of sq remains + * unchanged. If a link is deleted, the offload status is set to + * destroying and is irreversible. + */ + spin_lock_irqsave(&parent_queue_info->parent_queue_state_lock, flag); + + /* + * According to the fault tolerance principle, even if the connection + * deletion times out and the sts returns to delete the connection, one + * indicates thatthe cancel timer is successful, and 0 indicates that + * the timer is being processed. + */ + if (!cancel_delayed_work( + &parent_queue_info->parent_sq_info.del_work)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) rport_index(0x%x) delete rport timer maybe timeout", + v_hba->port_cfg.port_id, + rport_index); + } + + /* + * If the SessRstSts is returned too late and the Parent Queue Info + * resource is released, OK is returned. + */ + if (parent_queue_info->offload_state != HIFC_QUEUE_STATE_DESTROYING) { + spin_unlock_irqrestore( + &parent_queue_info->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]Port(0x%x) reset session cmd complete, no need to free parent qinfo, rport_index(0x%x) status_code(0x%x) remain_cnt(0x%x)", + v_hba->port_cfg.port_id, + rport_index, + sess_sts->ch.wd0.err_code, + sess_sts->ch.wd0.cqe_remain_cnt); + + return RETURN_OK; + } + + if (parent_queue_info->parent_ctx.cqm_parent_ctx_obj) { + ctx_dw = (unsigned int *)((void *)(parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr)); + ctx_flush_done = ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + /* memory barr */ + mb(); + if (ctx_flush_done == 0) { + spin_unlock_irqrestore( + &parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) rport(0x%x) flushdone is not set, delay to free parent session", + v_hba->port_cfg.port_id, rport_index); + + /* If flushdone bit is not set,delay free Sq info */ + ret = queue_delayed_work( + v_hba->work_queue, + &parent_queue_info->parent_sq_info.flush_done_tmo_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS)); + if (ret == (int)false) { + HIFC_HBA_STAT( + v_hba, + HIFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK); + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) rport(0x%x) queue delayed work failed iret:%d", + v_hba->port_cfg.port_id, + rport_index, ret); + } + + return RETURN_OK; + } + } + + spin_unlock_irqrestore(&parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to free parent session with rport_index(0x%x)", + v_hba->port_cfg.port_id, + rport_index); + + hifc_free_parent_queue_info(v_hba, parent_queue_info); + + return RETURN_OK; +} + +static unsigned int hifc_scq_rcv_clear_srq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* + * clear ELS/Immi SRQ + * ---then--->>> Destroy SRQ + */ + + struct hifc_hba_s *hba = v_hba; + struct hifc_srq_info_s *srq_info = NULL; + + if (HIFC_GET_SCQE_STATUS(v_scqe) != 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) clear srq failed, status(0x%x)", + v_hba->port_cfg.port_id, + HIFC_GET_SCQE_STATUS(v_scqe)); + + return RETURN_OK; + } + + srq_info = &hba->els_srq_info; + + /* + * 1: cancel timer succeed + * 0: the timer is being processed, the SQ is released when the timer + * times out + */ + if (cancel_delayed_work(&srq_info->del_work)) { + /* + * not free srq resource, it will be freed on hba remove + */ + srq_info->state = HIFC_CLEAN_DONE; + } + + return RETURN_OK; +} + +static unsigned int hifc_scq_rcv_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + unsigned int rx_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_itmf_marker_sts_s *marker_sts = NULL; + + marker_sts = &v_scqe->itmf_marker_sts; + ox_id = (unsigned int)marker_sts->wd1.ox_id; + ox_id = ox_id - v_hba->exit_base; + rx_id = (unsigned int)marker_sts->wd1.rx_id; + pkg.frame_head.oxid_rxid = rx_id | (unsigned int)(ox_id) << 16; + + pkg.frame_head.csctl_sid = marker_sts->wd3.sid; + pkg.frame_head.rctl_did = marker_sts->wd2.did; + + /* 1. set pkg status */ + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) + pkg.status = UNF_IO_FAILED; + else + pkg.status = UNF_IO_SUCCESS; + + /* 2 .process rcvd marker STS: set exchange state */ + ret = hifc_rcv_tmf_marker_sts(v_hba, &pkg, ox_id); + + return ret; +} + +static unsigned int hifc_scq_rcv_abts_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + unsigned int rx_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + + struct hifcoe_scqe_abts_marker_sts_s *abts_sts = NULL; + + abts_sts = &v_scqe->abts_marker_sts; + if (!abts_sts) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]ABTS marker STS is NULL"); + return ret; + } + + ox_id = (unsigned int)abts_sts->wd1.ox_id; + ox_id = ox_id - v_hba->exit_base; + rx_id = (unsigned int)abts_sts->wd1.rx_id; + pkg.frame_head.oxid_rxid = rx_id | (unsigned int)(ox_id) << 16; + pkg.frame_head.csctl_sid = abts_sts->wd3.sid; + pkg.frame_head.rctl_did = abts_sts->wd2.did; + /* abts marker abts_maker_status as ucode stat */ + pkg.abts_maker_status = (unsigned int)abts_sts->wd3.io_state; + + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) + pkg.status = UNF_IO_FAILED; + else + pkg.status = UNF_IO_SUCCESS; + + ret = hifc_rcv_abts_marker_sts(v_hba, &pkg, ox_id); + + return ret; +} + +unsigned int hifc_handle_aeq_offload_err(struct hifc_hba_s *v_hba, + struct hifcoe_aqe_data_s *v_aeq_msg) +{ + unsigned int ret = RETURN_OK; + struct hifcoe_aqe_data_s *aeq_msg; + unsigned int rport_index = 0; + unsigned int local_ctx_id = 0; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + unsigned long flag = 0; + + aeq_msg = v_aeq_msg; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) receive off_load Err Event, EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x)", + v_hba->port_cfg.port_id, aeq_msg->wd0.evt_code, + aeq_msg->wd0.conn_id, aeq_msg->wd1.xid); + + /* Currently, only the offload failure caused by insufficient scqe is + * processed. Other errors are not processed temporarily. + */ + if (unlikely(aeq_msg->wd0.evt_code != + FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) receive an unsupported error code of AEQ Event, EvtCode(0x%x) Conn_id(0x%x)", + v_hba->port_cfg.port_id, aeq_msg->wd0.evt_code, + aeq_msg->wd0.conn_id); + + return UNF_RETURN_ERROR; + } + HIFC_SCQ_ERR_TYPE_STAT(v_hba, FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL); + + rport_index = aeq_msg->wd0.conn_id; + local_ctx_id = aeq_msg->wd1.xid; + + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x) is invalid, Xid(0x%x)", + v_hba->port_cfg.port_id, rport_index, + aeq_msg->wd1.xid); + + return UNF_RETURN_ERROR; + } + + prnt_qinfo = &v_hba->parent_queue_mgr->parent_queues[rport_index]; + if (hifc_check_rport_is_valid(prnt_qinfo, local_ctx_id) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x), context id(0x%x) is invalid", + v_hba->port_cfg.port_id, rport_index, local_ctx_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + /* The offload status is restored only + * when the offload status is offloading + */ + if (prnt_qinfo->offload_state == HIFC_QUEUE_STATE_OFFLOADING) + prnt_qinfo->offload_state = HIFC_QUEUE_STATE_INITIALIZED; + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, flag); + + if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == UNF_TRUE) { + destroy_sqe_info.valid = + prnt_qinfo->parent_sq_info.destroy_sqe.valid; + destroy_sqe_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_index; + destroy_sqe_info.time_out = + prnt_qinfo->parent_sq_info.destroy_sqe.time_out; + destroy_sqe_info.start_jiff = + prnt_qinfo->parent_sq_info.destroy_sqe.start_jiff; + + destroy_sqe_info.rport_info.nport_id = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id; + + destroy_sqe_info.rport_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index; + + destroy_sqe_info.rport_info.port_name = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name; + + prnt_qinfo->parent_sq_info.destroy_sqe.valid = UNF_FALSE; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) pop up delay destroy parent sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + v_hba->port_cfg.port_id, + destroy_sqe_info.start_jiff, + destroy_sqe_info.time_out, + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index, + HIFC_QUEUE_STATE_INITIALIZED); + + ret = hifc_free_parent_resource(v_hba, + &destroy_sqe_info.rport_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) pop delay destroy parent sq failed, rport index 0x%x, rport nport id 0x%x", + v_hba->port_cfg.port_id, + destroy_sqe_info.rport_info.rport_index, + destroy_sqe_info.rport_info.nport_id); + } + } + + return ret; +} diff --git a/drivers/scsi/huawei/hifc/hifc_service.h b/drivers/scsi/huawei/hifc/hifc_service.h new file mode 100644 index 000000000000..c810cc7e64cb --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_service.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_SERVICE_H__ +#define __HIFC_SERVICE_H__ + +/* Send ElsCmnd or ElsRsp */ +unsigned int hifc_send_els_cmnd(void *phba, struct unf_frame_pkg_s *v_pkg); + +/* Send GsCmnd */ +unsigned int hifc_send_gs_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg); + +/* Send BlsCmnd */ +unsigned int hifc_send_bls_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg); + +/* Receive Frame from Root RQ */ +unsigned int hifc_rcv_service_frame_from_rq( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_info_s *rq_info, + struct hifc_root_rq_complet_info_s *v_complet_info, + unsigned short v_rcv_buf_num); + +unsigned int hifc_rq_rcv_srv_err(struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_info); + +unsigned int hifc_rq_rcv_els_rsp_sts( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_info); + +/* Receive Frame from SCQ */ +unsigned int hifc_rcv_scqe_entry_from_scq(void *v_hba, void *v_scqe, + unsigned int scq_idx); + +/* FC txmfs */ +#define HIFC_DEFAULT_TX_MAX_FREAM_SIZE 256 + +#define HIFC_FIRST_PKG_FLAG (1 << 0) +#define HIFC_LAST_PKG_FLAG (1 << 1) + +#define HIFC_CHECK_IF_FIRST_PKG(pkg_flag) ((pkg_flag) & HIFC_FIRST_PKG_FLAG) +#define HIFC_CHECK_IF_LAST_PKG(pkg_flag) ((pkg_flag) & HIFC_LAST_PKG_FLAG) + +#define HIFC_GET_SERVICE_TYPE(v_hba) 12 +#define HIFC_GET_PACKET_TYPE(v_service_type) 1 +#define HIFC_GET_PACKET_COS(v_service_type) 1 +#define HIFC_GET_PRLI_PAYLOAD_LEN \ + (UNF_PRLI_PAYLOAD_LEN - UNF_PRLI_SIRT_EXTRA_SIZE) +/* Start addr of the header/payloed of the cmnd buffer in the pkg */ +#define HIFC_FC_HEAD_LEN (sizeof(struct unf_fchead_s)) +#define HIFC_PAYLOAD_OFFSET (sizeof(struct unf_fchead_s)) +#define HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg) \ + UNF_GET_FLOGI_PAYLOAD(v_pkg) +#define HIFC_GET_CMND_HEADER_ADDR(v_pkg) \ + ((v_pkg)->unf_cmnd_pload_bl.buffer_ptr) +#define HIFC_GET_RSP_HEADER_ADDR(v_pkg) \ + ((v_pkg)->unf_rsp_pload_bl.buffer_ptr) +#define HIFC_GET_RSP_PAYLOAD_ADDR(v_pkg) \ + ((v_pkg)->unf_rsp_pload_bl.buffer_ptr + HIFC_PAYLOAD_OFFSET) +#define HIFC_GET_CMND_FC_HEADER(v_pkg) \ + (&(UNF_GET_SFS_ENTRY(v_pkg)->sfs_common.frame_head)) +#define HIFC_PKG_IS_ELS_RSP(els_cmnd_type) \ + (((els_cmnd_type) == ELS_ACC) || ((els_cmnd_type) == ELS_RJT)) +#define HIFC_XID_IS_VALID(xid, exi_base, exi_count) \ + (((xid) >= (exi_base)) && ((xid) < ((exi_base) + (exi_count)))) + +#define UNF_FC_PAYLOAD_ELS_MASK 0xFF000000 +#define UNF_FC_PAYLOAD_ELS_SHIFT 24 +#define UNF_FC_PAYLOAD_ELS_DWORD 0 + +/* Note: this pfcpayload is little endian */ +#define UNF_GET_FC_PAYLOAD_ELS_CMND(pfcpayload) \ + UNF_GET_SHIFTMASK(((unsigned int *)(void *)pfcpayload)\ + [UNF_FC_PAYLOAD_ELS_DWORD], \ + UNF_FC_PAYLOAD_ELS_SHIFT, UNF_FC_PAYLOAD_ELS_MASK) + +#define HIFC_ELS_CMND_MASK 0xffff +#define HIFC_ELS_CMND__RELEVANT_SHIFT 16UL +#define HIFC_GET_ELS_CMND_CODE(__cmnd) \ + ((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK)) +#define HIFC_GET_ELS_RSP_TYPE(__cmnd) \ + ((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK)) +#define HIFC_GET_ELS_RSP_CODE(__cmnd) \ + ((unsigned short)((__cmnd) >> HIFC_ELS_CMND__RELEVANT_SHIFT & \ + HIFC_ELS_CMND_MASK)) +#define HIFC_GET_GS_CMND_CODE(__cmnd) \ + ((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK)) + +/* ELS CMND Request */ +#define ELS_CMND 0 + +/* fh_f_ctl - Frame control flags. */ +#define HIFC_FC_EX_CTX (1 << 23) /* sent by responder to exchange */ +#define HIFC_FC_SEQ_CTX (1 << 22) /* sent by responder to sequence */ +#define HIFC_FC_FIRST_SEQ (1 << 21) /* first sequence of this exchange */ +#define HIFC_FC_LAST_SEQ (1 << 20) /* last sequence of this exchange */ +#define HIFC_FC_END_SEQ (1 << 19) /* last frame of sequence */ +#define HIFC_FC_END_CONN (1 << 18) /* end of class 1 connection pending */ +#define HIFC_FC_RES_B17 (1 << 17) /* reserved */ +#define HIFC_FC_SEQ_INIT (1 << 16) /* transfer of sequence initiative */ +#define HIFC_FC_X_ID_REASS (1 << 15) /* exchange ID has been changed */ +#define HIFC_FC_X_ID_INVAL (1 << 14) /* exchange ID invalidated */ +#define HIFC_FC_ACK_1 (1 << 12) /* 13:12 = 1: ACK_1 expected */ +#define HIFC_FC_ACK_N (2 << 12) /* 13:12 = 2: ACK_N expected */ +#define HIFC_FC_ACK_0 (3 << 12) /* 13:12 = 3: ACK_0 expected */ +#define HIFC_FC_RES_B11 (1 << 11) /* reserved */ +#define HIFC_FC_RES_B10 (1 << 10) /* reserved */ +#define HIFC_FC_RETX_SEQ (1 << 9) /* retransmitted sequence */ +#define HIFC_FC_UNI_TX (1 << 8) /* unidirectional transmit (class 1) */ +#define HIFC_FC_CONT_SEQ(i) ((i) << 6) +#define HIFC_FC_ABT_SEQ(i) ((i) << 4) +#define HIFC_FC_REL_OFF (1 << 3) /* parameter is relative offset */ +#define HIFC_FC_RES2 (1 << 2) /* reserved */ +#define HIFC_FC_FILL(i) ((i) & 3) /* 1:0: bytes of trailing fill */ + +#define HIFC_FCTL_REQ (HIFC_FC_FIRST_SEQ | HIFC_FC_END_SEQ |\ + HIFC_FC_SEQ_INIT) +#define HIFC_FCTL_RESP (HIFC_FC_EX_CTX | HIFC_FC_LAST_SEQ | \ + HIFC_FC_END_SEQ | HIFC_FC_SEQ_INIT) +#define HIFC_RCTL_BLS_REQ 0x81 +#define HIFC_RCTL_BLS_ACC 0x84 +#define HIFC_RCTL_BLS_RJT 0x85 + +#define UNF_IO_STATE_NEW 0 +#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) +#define TGT_IO_STATE_RSP (1 << 5) +#define TGT_IO_STATE_ABORT (1 << 7) + +enum HIFC_FC_FH_TYPE_E { + HIFC_FC_TYPE_BLS = 0x00, /* basic link service */ + HIFC_FC_TYPE_ELS = 0x01, /* extended link service */ + HIFC_FC_TYPE_IP = 0x05, /* IP over FC, RFC 4338 */ + HIFC_FC_TYPE_FCP = 0x08, /* SCSI FCP */ + HIFC_FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */ + HIFC_FC_TYPE_ILS = 0x22 /* internal link service */ +}; + +enum HIFC_FC_FH_RCTL_E { + HIFC_FC_RCTL_DD_UNCAT = 0x00, /* uncategorized information */ + HIFC_FC_RCTL_DD_SOL_DATA = 0x01, /* solicited data */ + HIFC_FC_RCTL_DD_UNSOL_CTL = 0x02, /* unsolicited control */ + HIFC_FC_RCTL_DD_SOL_CTL = 0x03, /* solicited control or reply */ + HIFC_FC_RCTL_DD_UNSOL_DATA = 0x04, /* unsolicited data */ + HIFC_FC_RCTL_DD_DATA_DESC = 0x05, /* data descriptor */ + HIFC_FC_RCTL_DD_UNSOL_CMD = 0x06, /* unsolicited command */ + HIFC_FC_RCTL_DD_CMD_STATUS = 0x07, /* command status */ + +#define HIFC_FC_RCTL_ILS_REQ HIFC_FC_RCTL_DD_UNSOL_CTL /* ILS request */ +#define HIFC_FC_RCTL_ILS_REP HIFC_FC_RCTL_DD_SOL_CTL /* ILS reply */ + + /* + * Extended Link_Data + */ + HIFC_FC_RCTL_ELS_REQ = 0x22, /* extended link services request */ + HIFC_FC_RCTL_ELS_RSP = 0x23, /* extended link services reply */ + HIFC_FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */ + HIFC_FC_RCTL_ELS4_RSP = 0x33, /* FC-4 ELS reply */ + /* + * Optional Extended Headers + */ + HIFC_FC_RCTL_VFTH = 0x50, /* virtual fabric tagging header */ + HIFC_FC_RCTL_IFRH = 0x51, /* inter-fabric routing header */ + HIFC_FC_RCTL_ENCH = 0x52, /* encapsulation header */ + /* + * Basic Link Services fh_r_ctl values. + */ + HIFC_FC_RCTL_BA_NOP = 0x80, /* basic link service NOP */ + HIFC_FC_RCTL_BA_ABTS = 0x81, /* basic link service abort */ + HIFC_FC_RCTL_BA_RMC = 0x82, /* remove connection */ + HIFC_FC_RCTL_BA_ACC = 0x84, /* basic accept */ + HIFC_FC_RCTL_BA_RJT = 0x85, /* basic reject */ + HIFC_FC_RCTL_BA_PRMT = 0x86, /* dedicated connection preempted */ + /* + * Link Control Information. + */ + HIFC_FC_RCTL_ACK_1 = 0xc0, /* acknowledge_1 */ + HIFC_FC_RCTL_ACK_0 = 0xc1, /* acknowledge_0 */ + HIFC_FC_RCTL_P_RJT = 0xc2, /* port reject */ + HIFC_FC_RCTL_F_RJT = 0xc3, /* fabric reject */ + HIFC_FC_RCTL_P_BSY = 0xc4, /* port busy */ + HIFC_FC_RCTL_F_BSY = 0xc5, /* fabric busy to data frame */ + HIFC_FC_RCTL_F_BSYL = 0xc6, /* fabric busy to link control frame */ + HIFC_FC_RCTL_LCR = 0xc7, /* link credit reset */ + HIFC_FC_RCTL_END = 0xc9 /* end */ +}; + +struct hifc_fc_frame_header { + unsigned char rctl; /* routing control */ + unsigned char did[3]; /* Destination ID */ + + unsigned char cs_ctl; /* class of service control / pri */ + unsigned char sid[3]; /* Source ID */ + + unsigned char type; /* see enum fc_fh_type below */ + unsigned char frame_ctl[3]; /* frame control */ + + unsigned char seq_id; /* sequence ID */ + unsigned char df_ctl; /* data field control */ + unsigned short seq_cnt; /* sequence count */ + + unsigned short ox_id; /* originator exchange ID */ + unsigned short rx_id; /* responder exchange ID */ + unsigned int parm_offset; /* parameter or relative offset */ +}; + +unsigned int hifc_rcv_els_cmnd(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned char *v_pld, + unsigned int pld_len, + int first_frame); +unsigned int hifc_rcv_els_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id); +unsigned int hifc_rcv_els_rsp_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int rx_id); +unsigned int hifc_rcv_gs_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id); +unsigned int hifc_rcv_bls_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id); + +void hifc_save_login_para_in_sq_info( + struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_coparms); +unsigned int hifc_handle_aeq_offload_err(struct hifc_hba_s *v_hba, + struct hifcoe_aqe_data_s *v_aeg_msg); + +#define HIFC_CHECK_PKG_ALLOCTIME(v_pkg) \ + do { \ + if (unlikely(UNF_GETXCHGALLOCTIME(v_pkg) == 0)) { \ + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, \ + UNF_WARN, \ + "[warn]Invalid MagicNum,S_ID(0x%x) D_ID(0x%x) OXID(0x%x) RX_ID(0x%x) pkg type(0x%x) hot pooltag(0x%x)", \ + UNF_GET_SID(v_pkg), \ + UNF_GET_DID(v_pkg), \ + UNF_GET_OXID(v_pkg), \ + UNF_GET_RXID(v_pkg), \ + ((struct unf_frame_pkg_s *)v_pkg)->type, \ + UNF_GET_XCHG_TAG(v_pkg)); \ + } \ + } while (0) + +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_disc.c b/drivers/scsi/huawei/hifc/unf_disc.c new file mode 100644 index 000000000000..12d8514af959 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_disc.c @@ -0,0 +1,1320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_disc.h" +#include "unf_event.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" + +#define UNF_LIST_RSCN_PAGE_CNT 2560 +#define UNF_MAX_PORTS_PRI_LOOP 2 +#define UNF_MAX_GS_SEND_NUM 8 +#define UNF_OS_REMOVE_CARD_TIMEOUT (60 * 1000) + +static void unf_set_disc_state(struct unf_disc_s *v_disc, + enum unf_disc_state_e v_en_states) +{ + UNF_CHECK_VALID(0x651, UNF_TRUE, v_disc, return); + + if (v_en_states != v_disc->en_states) { + /* Reset disc retry count */ + v_disc->retry_count = 0; + } + + v_disc->en_states = v_en_states; +} + +static inline unsigned int unf_get_loop_map(struct unf_lport_s *v_lport, + unsigned char v_loop_map[], + unsigned int loop_map_size) +{ + struct unf_buf_s buf = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID( + 0x652, UNF_TRUE, + v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + + buf.cbuf = v_loop_map; + buf.buf_len = loop_map_size; + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_LOOP_MAP, + (void *)&buf); + return ret; +} + +static int unf_discover_private_loop(void *v_arg_in, void *v_arg_out) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_arg_in; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int i = 0; + unsigned char loop_id = 0; + unsigned int alpa_index = 0; + unsigned char loop_map[UNF_LOOPMAP_COUNT]; + + UNF_REFERNCE_VAR(v_arg_out); + UNF_CHECK_VALID(0x653, UNF_TRUE, lport, return UNF_RETURN_ERROR); + memset(loop_map, 0x0, UNF_LOOPMAP_COUNT); + + /* Get Port Loop Map */ + ret = unf_get_loop_map(lport, loop_map, UNF_LOOPMAP_COUNT); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) get loop map failed", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Check Loop Map Ports Count */ + if (loop_map[0] > UNF_MAX_PORTS_PRI_LOOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has more than %d ports(%u) in private loop", + lport->port_id, UNF_MAX_PORTS_PRI_LOOP, + loop_map[0]); + + return UNF_RETURN_ERROR; + } + + /* AL_PA = 0 means Public Loop */ + if ((loop_map[1] == UNF_FL_PORT_LOOP_ADDR) || + (loop_map[2] == UNF_FL_PORT_LOOP_ADDR)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) one or more AL_PA is 0x00, indicate it's FL_Port", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Discovery Private Loop Ports */ + for (i = 0; i < loop_map[0]; i++) { + alpa_index = i + 1; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) start to disc(0x%x) with count(0x%x)", + lport->port_id, loop_map[alpa_index], i); + + /* Check whether need delay to send PLOGI or not */ + loop_id = loop_map[alpa_index]; + unf_login_with_loop_node(lport, (unsigned int)loop_id); + } + + return RETURN_OK; +} + +static unsigned int unf_disc_start(void *v_lport) +{ + /* + * Call by: + * 1. Enter Private Loop Login + * 2. Analysis RSCN payload + * 3. SCR callback + ** + * Doing: + * Fabric/Public Loop: Send GID_PT + * Private Loop: (delay to) send PLOGI or send LOGO immediately + * P2P: do nothing + */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_cm_event_report *event = NULL; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + enum unf_act_topo_e act_topo = UNF_ACT_TOP_UNKNOWN; + + UNF_CHECK_VALID(0x654, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + act_topo = lport->en_act_topo; + disc = &lport->disc; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) with topo(0x%x) begin to discovery", + lport->port_id, act_topo); + + if ((act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (act_topo == UNF_ACT_TOP_PUBLIC_LOOP)) { + /* 1. Fabric or Public Loop Topology: for directory server */ + /* 0xfffffc */ + rport = unf_get_rport_by_nport_id(lport, + UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) unable to get SNS RPort(0xfffffc)", + lport->port_id); + + rport = unf_rport_get_free_and_init( + lport, + UNF_PORT_TYPE_FC, + UNF_FC_FID_DIR_SERV); + if (!rport) + return UNF_RETURN_ERROR; + rport->nport_id = UNF_FC_FID_DIR_SERV; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_set_disc_state(disc, UNF_DISC_ST_START); /* disc start */ + unf_disc_state_ma(lport, UNF_EVENT_DISC_NORMAL_ENTER); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* + * NOTE: Send GID_PT + * The Name Server shall, when it receives a GID_PT request, + * return all Port Identifiers having registered support for + * the specified Port Type. + * One or more Port Identifiers, having registered as + * the specified Port Type, are returned. + */ + ret = unf_send_gid_pt(lport, rport); + if (ret != RETURN_OK) + unf_disc_error_recovery(lport); + } else if (act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + /* Private Loop: to thread process */ + event = unf_get_one_event_node(lport); + UNF_CHECK_VALID(0x655, UNF_TRUE, NULL != event, + return UNF_RETURN_ERROR); + + event->lport = lport; + event->event_asy_flag = UNF_EVENT_ASYN; + event->pfn_unf_event_task = unf_discover_private_loop; + event->para_in = (void *)lport; + + unf_post_one_event_node(lport, event); + } else { + /* P2P toplogy mode: Do nothing */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) with topo(0x%x) need do nothing", + lport->port_id, act_topo); + } + + return ret; +} + +static unsigned int unf_disc_stop(void *v_lport) +{ + /* Call by GID_ACC processer */ + struct unf_lport_s *lport = NULL; + struct unf_lport_s *root_lport = NULL; + struct unf_rport_s *sns_port = NULL; + struct unf_disc_rport_s *disc_rport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_disc_s *root_disc = NULL; + struct list_head *node = NULL; + unsigned long flag = 0; + unsigned int ret = RETURN_OK; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x656, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + root_lport = (struct unf_lport_s *)lport->root_lport; + root_disc = &root_lport->disc; + + /* Get R_Port for Directory server */ + /* 0xfffffc */ + sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find fabric RPort(0xfffffc) failed", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* for R_Port from disc pool busy list */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if (list_empty(&disc->disc_rport_mgr.list_disc_rport_busy)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Empty and return directly */ + return RETURN_OK; + } + + node = (&disc->disc_rport_mgr.list_disc_rport_busy)->next; + do { + /* Delete from Disc busy list */ + disc_rport = list_entry(node, struct unf_disc_rport_s, + entry_rport); + nport_id = disc_rport->nport_id; + list_del_init(node); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Add back to (free) Disc R_Port pool (list) */ + spin_lock_irqsave(&root_disc->rport_busy_pool_lock, flag); + list_add_tail(node, + &root_disc->disc_rport_mgr.list_disc_rports_pool); + spin_unlock_irqrestore(&root_disc->rport_busy_pool_lock, flag); + + /* Send GNN_ID to Name Server */ + ret = unf_get_and_post_disc_event(lport, sns_port, nport_id, + UNF_DISC_GET_NODE_NAME); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->nport_id, UNF_DISC_GET_NODE_NAME, + nport_id); + + /* NOTE: go to next stage */ + unf_rcv_gnn_id_rsp_unknown(lport, sns_port, + nport_id); + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + node = (&disc->disc_rport_mgr.list_disc_rport_busy)->next; + + } while (node != &disc->disc_rport_mgr.list_disc_rport_busy); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + return ret; +} + +static void unf_disc_callback(void *v_lport, unsigned int v_result) +{ + /* Do nothing */ + UNF_REFERNCE_VAR(v_lport); + UNF_REFERNCE_VAR(v_result); +} + +/* + * Function Name : unf_init_rport_pool + * Function Description: Init R_Port (free) Pool + * Input Parameters : struct unf_lport_s *v_lport + * Output Parameters : N/A + * Return Type : unsigned int + */ +static unsigned int unf_init_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_rport_pool_s *rport_pool = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned int bit_map_cnt = 0; + unsigned long flag = 0; + unsigned int max_login = 0; + + UNF_CHECK_VALID(0x657, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* Init RPort Pool info */ + rport_pool = &v_lport->rport_pool; + max_login = v_lport->low_level_func.lport_cfg_items.max_login; + rport_pool->rport_pool_completion = NULL; + rport_pool->rport_pool_count = max_login; + spin_lock_init(&rport_pool->rport_free_pool_lock); + INIT_LIST_HEAD(&rport_pool->list_rports_pool); /* free RPort pool */ + + /* 1. Alloc RPort Pool buffer/resource (memory) */ + rport_pool->rport_pool_add = + vmalloc((size_t)(max_login * sizeof(struct unf_rport_s))); + if (!rport_pool->rport_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate RPort(s) resource failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(rport_pool->rport_pool_add, 0, + (max_login * sizeof(struct unf_rport_s))); + + /* 2. Alloc R_Port Pool bitmap */ + bit_map_cnt = (v_lport->low_level_func.support_max_rport) / + BITS_PER_LONG + 1; + rport_pool->pul_rpi_bitmap = vmalloc((size_t)(bit_map_cnt * + sizeof(unsigned long))); + if (!rport_pool->pul_rpi_bitmap) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate RPort Bitmap failed", + v_lport->port_id); + + vfree(rport_pool->rport_pool_add); + rport_pool->rport_pool_add = NULL; + return UNF_RETURN_ERROR; + } + memset(rport_pool->pul_rpi_bitmap, 0, + (bit_map_cnt * sizeof(unsigned long))); + + /* 3. Rport resource Management: Add Rports (buffer) + * to Rport Pool List + */ + rport = (struct unf_rport_s *)(rport_pool->rport_pool_add); + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + for (i = 0; i < rport_pool->rport_pool_count; i++) { + spin_lock_init(&rport->rport_state_lock); + list_add_tail(&rport->entry_rport, + &rport_pool->list_rports_pool); + sema_init(&rport->task_sema, 0); + rport++; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + return ret; +} + +static void unf_free_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_rport_pool_s *rport_pool = NULL; + int wait = UNF_FALSE; + unsigned long flag = 0; + unsigned int remain = 0; + unsigned long long time_out = 0; + unsigned int max_login = 0; + unsigned int i; + struct unf_rport_s *rport; + + struct completion rport_pool_completion = + COMPLETION_INITIALIZER(rport_pool_completion); + + UNF_CHECK_VALID(0x671, UNF_TRUE, v_lport, return); + UNF_REFERNCE_VAR(remain); + + rport_pool = &v_lport->rport_pool; + max_login = v_lport->low_level_func.lport_cfg_items.max_login; + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + if (max_login != rport_pool->rport_pool_count) { + rport_pool->rport_pool_completion = &rport_pool_completion; + remain = max_login - rport_pool->rport_pool_count; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for RPort pool completion(%ld), remain(0x%x)", + v_lport->port_id, jiffies, remain); + + time_out = wait_for_completion_timeout( + rport_pool->rport_pool_completion, + msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT)); + if (time_out == 0) + unf_cmmark_dirty_mem( + v_lport, + UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for RPort pool completion end(%ld)", + v_lport->port_id, jiffies); + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + rport_pool->rport_pool_completion = NULL; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + } + + rport = (struct unf_rport_s *)(rport_pool->rport_pool_add); + for (i = 0; i < rport_pool->rport_pool_count; i++) { + if (!rport) + break; + rport++; + } + + if ((v_lport->dirty_flag & + UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) == 0) { + vfree(rport_pool->rport_pool_add); + rport_pool->rport_pool_add = NULL; /* R_Port pool */ + vfree(rport_pool->pul_rpi_bitmap); /* R_Port bitmap */ + rport_pool->pul_rpi_bitmap = NULL; + } + UNF_REFERNCE_VAR(remain); +} + +static void unf_init_rscn_node(struct unf_port_id_page_s *v_port_id_page) +{ + UNF_CHECK_VALID(0x658, UNF_TRUE, v_port_id_page, return); + + v_port_id_page->uc_addr_format = 0; + v_port_id_page->uc_event_qualifier = 0; + v_port_id_page->uc_reserved = 0; + v_port_id_page->port_id_area = 0; + v_port_id_page->port_id_domain = 0; + v_port_id_page->port_id_port = 0; +} + +struct unf_port_id_page_s *unf_get_free_rscn_node(void *v_rscn_mg) +{ + /* Call by Save RSCN Port_ID */ + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct unf_port_id_page_s *port_id_node = NULL; + struct list_head *list_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x659, UNF_TRUE, v_rscn_mg, return NULL); + rscn_mgr = (struct unf_rscn_mg_s *)v_rscn_mg; + + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + if (list_empty(&rscn_mgr->list_free_rscn_page)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, + UNF_WARN, + "[warn]No RSCN node anymore"); + + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + return NULL; + } + + /* Get from list_free_RSCN_page */ + list_node = (&rscn_mgr->list_free_rscn_page)->next; + list_del(list_node); + rscn_mgr->free_rscn_count--; + port_id_node = list_entry(list_node, struct unf_port_id_page_s, + list_node_rscn); + unf_init_rscn_node(port_id_node); + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + + return port_id_node; +} + +static void unf_release_rscn_node(void *v_rscn_mg, + void *v_port_id_node) +{ + /* Call by RSCN GID_ACC */ + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct unf_port_id_page_s *port_id_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x660, UNF_TRUE, v_rscn_mg, return); + UNF_CHECK_VALID(0x661, UNF_TRUE, v_port_id_node, return); + rscn_mgr = (struct unf_rscn_mg_s *)v_rscn_mg; + port_id_node = (struct unf_port_id_page_s *)v_port_id_node; + + /* Back to list_free_RSCN_page */ + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + rscn_mgr->free_rscn_count++; + unf_init_rscn_node(port_id_node); + list_add_tail(&port_id_node->list_node_rscn, + &rscn_mgr->list_free_rscn_page); + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); +} + +static unsigned int unf_init_rscn_pool(struct unf_lport_s *v_lport) +{ + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct unf_port_id_page_s *port_id_page = NULL; + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x662, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + rscn_mgr = &v_lport->disc.rscn_mgr; + + /* Get RSCN Pool buffer */ + rscn_mgr->rscn_pool_add = + vmalloc(UNF_LIST_RSCN_PAGE_CNT * + sizeof(struct unf_port_id_page_s)); + if (!rscn_mgr->rscn_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RSCN pool failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(rscn_mgr->rscn_pool_add, 0, + sizeof(struct unf_port_id_page_s) * UNF_LIST_RSCN_PAGE_CNT); + + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + port_id_page = (struct unf_port_id_page_s *)(rscn_mgr->rscn_pool_add); + for (i = 0; i < UNF_LIST_RSCN_PAGE_CNT; i++) { + /* Add tail to list_free_RSCN_page */ + list_add_tail(&port_id_page->list_node_rscn, + &rscn_mgr->list_free_rscn_page); + + rscn_mgr->free_rscn_count++; + port_id_page++; + } + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + + return ret; +} + +static void unf_free_rscn_pool(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x663, UNF_TRUE, v_lport, return); + + disc = &v_lport->disc; + if (disc->rscn_mgr.rscn_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) free RSCN pool", + v_lport->nport_id); + + vfree(disc->rscn_mgr.rscn_pool_add); + disc->rscn_mgr.rscn_pool_add = NULL; + } +} + +static unsigned int unf_init_rscn_mgr(struct unf_lport_s *v_lport) +{ + struct unf_rscn_mg_s *rscn_mgr = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x664, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + rscn_mgr = &v_lport->disc.rscn_mgr; + + /* free RSCN page list */ + INIT_LIST_HEAD(&rscn_mgr->list_free_rscn_page); + /* busy RSCN page list */ + INIT_LIST_HEAD(&rscn_mgr->list_using_rscn_page); + spin_lock_init(&rscn_mgr->rscn_id_list_lock); + rscn_mgr->free_rscn_count = 0; + rscn_mgr->pfn_unf_get_free_rscn_node = unf_get_free_rscn_node; + rscn_mgr->pfn_unf_release_rscn_node = unf_release_rscn_node; + + ret = unf_init_rscn_pool(v_lport); + return ret; +} + +static void unf_destroy_rscn_mgr(struct unf_lport_s *v_lport) +{ + struct unf_rscn_mg_s *rscn_mgr = NULL; + + UNF_CHECK_VALID(0x665, UNF_TRUE, v_lport, return); + rscn_mgr = &v_lport->disc.rscn_mgr; + + rscn_mgr->free_rscn_count = 0; + rscn_mgr->pfn_unf_get_free_rscn_node = NULL; + rscn_mgr->pfn_unf_release_rscn_node = NULL; + + unf_free_rscn_pool(v_lport); +} + +static unsigned int unf_init_disc_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_disc_rport_mg_s *disc_mgr = NULL; + struct unf_disc_rport_s *disc_rport = NULL; + unsigned int i = 0; + unsigned int max_login = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x662, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + max_login = v_lport->low_level_func.lport_cfg_items.max_login; + disc_mgr = &v_lport->disc.disc_rport_mgr; + + /* Alloc R_Port Disc Pool buffer (address) */ + disc_mgr->disc_pool_add = vmalloc(max_login * + sizeof(struct unf_disc_rport_s)); + if (!disc_mgr->disc_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate disc RPort pool failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(disc_mgr->disc_pool_add, 0, + (max_login * sizeof(struct unf_disc_rport_s))); + + /* Add R_Port to (free) DISC R_Port Pool */ + spin_lock_irqsave(&v_lport->disc.rport_busy_pool_lock, flag); + disc_rport = (struct unf_disc_rport_s *)(disc_mgr->disc_pool_add); + for (i = 0; i < max_login; i++) { + /* Add tail to list_disc_Rport_pool */ + list_add_tail(&disc_rport->entry_rport, + &disc_mgr->list_disc_rports_pool); + + disc_rport++; + } + spin_unlock_irqrestore(&v_lport->disc.rport_busy_pool_lock, flag); + + return RETURN_OK; +} + +static void unf_free_disc_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x663, UNF_TRUE, v_lport, return); + + disc = &v_lport->disc; + if (disc->disc_rport_mgr.disc_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) free disc RPort pool", + v_lport->port_id); + + vfree(disc->disc_rport_mgr.disc_pool_add); + disc->disc_rport_mgr.disc_pool_add = NULL; + } +} + +static int unf_discover_port_info(void *v_arg_in) +{ + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x2250, UNF_TRUE, v_arg_in, return UNF_RETURN_ERROR); + + gs_info = (struct unf_disc_gs_event_info *)v_arg_in; + lport = (struct unf_lport_s *)gs_info->lport; + rport = (struct unf_rport_s *)gs_info->rport; + + switch (gs_info->entype) { + case UNF_DISC_GET_PORT_NAME: + ret = unf_send_gpn_id(lport, rport, gs_info->rport_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send GPN_ID failed RPort(0x%x)", + lport->nport_id, gs_info->rport_id); + unf_rcv_gpn_id_rsp_unknown(lport, gs_info->rport_id); + } + break; + case UNF_DISC_GET_FEATURE: + ret = unf_send_gff_id(lport, rport, gs_info->rport_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send GFF_ID failed to get RPort(0x%x)'s feature", + lport->port_id, gs_info->rport_id); + + unf_rcv_gff_id_rsp_unknown(lport, gs_info->rport_id); + } + break; + case UNF_DISC_GET_NODE_NAME: + ret = unf_send_gnn_id(lport, rport, gs_info->rport_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) GNN_ID send failed with NPort ID(0x%x)", + lport->port_id, gs_info->rport_id); + + /* NOTE: Continue to next stage */ + unf_rcv_gnn_id_rsp_unknown(lport, rport, + gs_info->rport_id); + } + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Send GS packet type(0x%x) is unknown", + gs_info->entype); + } + + kfree(gs_info); + + return (int)ret; +} + +unsigned int unf_get_and_post_disc_event(void *v_lport, + void *v_sns_port, + unsigned int v_nport_id, + enum unf_disc_type_e v_en_type) +{ + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned long flag = 0; + struct unf_lport_s *root_lport = NULL; + struct unf_lport_s *lport = NULL; + struct unf_disc_manage_info_s *disc_info = NULL; + + UNF_CHECK_VALID(0x654, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x654, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + + if (lport->link_up == UNF_PORT_LINK_DOWN) + return RETURN_OK; + + root_lport = lport->root_lport; + disc_info = &root_lport->disc.disc_thread_info; + + if (disc_info->b_thread_exit == UNF_TRUE) + return RETURN_OK; + + gs_info = kmalloc(sizeof(struct unf_disc_gs_event_info), GFP_ATOMIC); + if (!gs_info) + return UNF_RETURN_ERROR; + + gs_info->entype = v_en_type; + gs_info->lport = v_lport; + gs_info->rport = v_sns_port; + gs_info->rport_id = v_nport_id; + + INIT_LIST_HEAD(&gs_info->list_entry); + + spin_lock_irqsave(&disc_info->disc_event_list_lock, flag); + list_add_tail(&gs_info->list_entry, &disc_info->list_head); + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag); + wake_up_process(disc_info->data_thread); + return RETURN_OK; +} + +static int unf_disc_event_process(void *v_arg) +{ + struct list_head *node = NULL; + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned long flags = 0; + struct unf_disc_s *disc = (struct unf_disc_s *)v_arg; + struct unf_disc_manage_info_s *disc_info = &disc->disc_thread_info; + + UNF_REFERNCE_VAR(v_arg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "Port(0x%x) enter discovery thread.", + disc->lport->port_id); + + while (!kthread_should_stop()) { + if (disc_info->b_thread_exit == UNF_TRUE) + break; + + spin_lock_irqsave(&disc_info->disc_event_list_lock, flags); + if ((list_empty(&disc_info->list_head) == UNF_TRUE) || + (atomic_read(&disc_info->disc_contrl_size) == 0)) { + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, + flags); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout((long)msecs_to_jiffies(1000)); + } else { + node = (&disc_info->list_head)->next; + list_del_init(node); + gs_info = list_entry(node, + struct unf_disc_gs_event_info, + list_entry); + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, + flags); + unf_discover_port_info(gs_info); + } + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, + UNF_MAJOR, + "Port(0x%x) discovery thread over.", disc->lport->port_id); + + return RETURN_OK; +} + +void unf_flush_disc_event(void *v_disc, void *v_vport) +{ + struct unf_disc_s *disc = (struct unf_disc_s *)v_disc; + struct unf_disc_manage_info_s *disc_info = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, v_disc, return); + + disc_info = &disc->disc_thread_info; + + spin_lock_irqsave(&disc_info->disc_event_list_lock, flag); + list_for_each_safe(list, list_tmp, &disc_info->list_head) { + gs_info = list_entry(list, struct unf_disc_gs_event_info, + list_entry); + + if (!v_vport || gs_info->lport == v_vport) { + list_del_init(&gs_info->list_entry); + kfree(gs_info); + } + } + + if (!v_vport) + atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM); + + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag); +} + +void unf_disc_ctrl_size_inc(void *v_lport, unsigned int v_cmnd) +{ + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, v_lport, return); + lport = (struct unf_lport_s *)v_lport; + lport = lport->root_lport; + UNF_CHECK_VALID(0x2249, UNF_TRUE, lport, return); + + if (atomic_read(&lport->disc.disc_thread_info.disc_contrl_size) == + UNF_MAX_GS_SEND_NUM) + return; + + if (v_cmnd == NS_GPN_ID || v_cmnd == NS_GNN_ID || v_cmnd == NS_GFF_ID) + atomic_inc(&lport->disc.disc_thread_info.disc_contrl_size); +} + +static void unf_destroy_disc_thread(void *v_disc) +{ + struct unf_disc_manage_info_s *disc_info = NULL; + struct unf_disc_s *disc = (struct unf_disc_s *)v_disc; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, disc, return); + + disc_info = &disc->disc_thread_info; + + disc_info->b_thread_exit = UNF_TRUE; + unf_flush_disc_event(disc, NULL); + + wake_up_process(disc_info->data_thread); + kthread_stop(disc_info->data_thread); + disc_info->data_thread = NULL; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) destroy discovery thread succeed.", + disc->lport->port_id); +} + +static unsigned int unf_create_disc_thread(void *v_disc) +{ + struct unf_disc_manage_info_s *disc_info = NULL; + struct unf_disc_s *disc = (struct unf_disc_s *)v_disc; + + UNF_CHECK_VALID(0x2250, UNF_TRUE, disc, return UNF_RETURN_ERROR); + + /* If the thread cannot be found, apply for a new thread. */ + disc_info = &disc->disc_thread_info; + + memset(disc_info, 0, sizeof(struct unf_disc_manage_info_s)); + + INIT_LIST_HEAD(&disc_info->list_head); + spin_lock_init(&disc_info->disc_event_list_lock); + atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM); + + disc_info->b_thread_exit = UNF_FALSE; + disc_info->data_thread = + kthread_create(unf_disc_event_process, disc, + "%x_DiscT", disc->lport->port_id); + + if (IS_ERR(disc_info->data_thread) || !disc_info->data_thread) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) creat discovery thread(0x%p) unsuccessful.", + disc->lport->port_id, disc_info->data_thread); + + return UNF_RETURN_ERROR; + } + + wake_up_process(disc_info->data_thread); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) creat discovery thread succeed.", + disc->lport->port_id); + + return RETURN_OK; +} + +static void unf_disc_ref_cnt_dec(struct unf_disc_s *v_disc) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x669, UNF_TRUE, v_disc, return); + + spin_lock_irqsave(&v_disc->rport_busy_pool_lock, flags); + if (atomic_dec_and_test(&v_disc->disc_ref_cnt)) { + if (v_disc->disc_completion) + complete(v_disc->disc_completion); + } + spin_unlock_irqrestore(&v_disc->rport_busy_pool_lock, flags); +} + +static void unf_lport_disc_timeout(struct work_struct *v_work) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + enum unf_disc_state_e en_state = UNF_DISC_ST_END; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x675, UNF_TRUE, v_work, return); + + disc = container_of(v_work, struct unf_disc_s, disc_work.work); + if (!disc) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Get discover pointer failed"); + + return; + } + + lport = disc->lport; + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Find Port by discovery work failed"); + + unf_disc_ref_cnt_dec(disc); + return; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + en_state = disc->en_states; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* 0xfffffc */ + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find fabric RPort failed", + lport->port_id); + + unf_disc_ref_cnt_dec(disc); + return; + } + + switch (en_state) { + case UNF_DISC_ST_START: + break; + + case UNF_DISC_ST_GIDPT_WAIT: + (void)unf_send_gid_pt(lport, rport); + break; + + case UNF_DISC_ST_GIDFT_WAIT: + (void)unf_send_gid_ft(lport, rport); + break; + + case UNF_DISC_ST_END: + break; + + default: + break; + } + + unf_disc_ref_cnt_dec(disc); +} + +unsigned int unf_init_disc_mgr(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x666, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + disc = &v_lport->disc; + disc->max_retry_count = UNF_DISC_RETRY_TIMES; + disc->retry_count = 0; + disc->disc_flag = UNF_DISC_NONE; + INIT_LIST_HEAD(&disc->list_busy_rports); /* busy RPort pool list */ + /* delete RPort pool list */ + INIT_LIST_HEAD(&disc->list_delete_rports); + /* destroy RPort pool list */ + INIT_LIST_HEAD(&disc->list_destroy_rports); + spin_lock_init(&disc->rport_busy_pool_lock); + + disc->disc_rport_mgr.disc_pool_add = NULL; + /* free disc RPort pool */ + INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rports_pool); + /* busy disc RPort pool */ + INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rport_busy); + + disc->disc_completion = NULL; + disc->lport = v_lport; + INIT_DELAYED_WORK(&disc->disc_work, unf_lport_disc_timeout); + disc->unf_disc_temp.pfn_unf_disc_start = unf_disc_start; + disc->unf_disc_temp.pfn_unf_disc_stop = unf_disc_stop; + disc->unf_disc_temp.pfn_unf_disc_callback = unf_disc_callback; + atomic_set(&disc->disc_ref_cnt, 0); + + /* Init RSCN Manager */ + ret = unf_init_rscn_mgr(v_lport); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + if (v_lport != v_lport->root_lport) + return ret; + + ret = unf_create_disc_thread(disc); + if (ret != RETURN_OK) { + unf_destroy_rscn_mgr(v_lport); + + return UNF_RETURN_ERROR; + } + + /* Init R_Port free Pool */ + ret = unf_init_rport_pool(v_lport); + if (ret != RETURN_OK) { + unf_destroy_disc_thread(disc); + unf_destroy_rscn_mgr(v_lport); + + return UNF_RETURN_ERROR; + } + + /* Init R_Port free disc Pool */ + ret = unf_init_disc_rport_pool(v_lport); + if (ret != RETURN_OK) { + unf_destroy_disc_thread(disc); + unf_free_rport_pool(v_lport); + unf_destroy_rscn_mgr(v_lport); + + return UNF_RETURN_ERROR; + } + + return ret; +} + +static void unf_wait_disc_complete(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + int wait = UNF_FALSE; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long long time_out = 0; + + struct completion disc_completion = + COMPLETION_INITIALIZER(disc_completion); + + disc = &v_lport->disc; + + UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id, &disc->disc_work, + "Disc_work"); + if (ret == RETURN_OK) + unf_disc_ref_cnt_dec(disc); + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if (atomic_read(&disc->disc_ref_cnt) != 0) { + disc->disc_completion = &disc_completion; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for discover completion(0x%lx)", + v_lport->port_id, jiffies); + + time_out = wait_for_completion_timeout( + disc->disc_completion, + msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT)); + if (time_out == 0) + unf_cmmark_dirty_mem(v_lport, + UNF_LPORT_DIRTY_FLAG_DISC_DIRTY); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for discover completion end(0x%lx)", + v_lport->port_id, jiffies); + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + disc->disc_completion = NULL; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + } +} + +void unf_disc_mgr_destroy(void *v_lport) +{ + struct unf_disc_s *disc = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x672, UNF_TRUE, v_lport, return); + lport = (struct unf_lport_s *)v_lport; + + disc = &lport->disc; + disc->retry_count = 0; + disc->unf_disc_temp.pfn_unf_disc_start = NULL; + disc->unf_disc_temp.pfn_unf_disc_stop = NULL; + disc->unf_disc_temp.pfn_unf_disc_callback = NULL; + + unf_free_disc_rport_pool(lport); + unf_destroy_rscn_mgr(lport); + unf_wait_disc_complete(lport); + + if (lport != lport->root_lport) + return; + + unf_destroy_disc_thread(disc); + unf_free_rport_pool(lport); + lport->destroy_step = UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR; +} + +void unf_disc_error_recovery(void *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + unsigned long delay = 0; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x673, UNF_TRUE, v_lport, return); + + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find RPort failed", + lport->port_id); + return; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + + /* Delay work is pending */ + if (delayed_work_pending(&disc->disc_work)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) disc_work is running and do nothing", + lport->port_id); + return; + } + + /* Continue to retry */ + if (disc->retry_count < disc->max_retry_count) { + disc->retry_count++; + delay = (unsigned long)lport->ed_tov; + + if (queue_delayed_work(unf_work_queue, &disc->disc_work, + (unsigned long)msecs_to_jiffies( + (unsigned int)delay))) { + atomic_inc(&disc->disc_ref_cnt); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + } else { + /* Go to next stage */ + if (disc->en_states == UNF_DISC_ST_GIDPT_WAIT) { + /* GID_PT_WAIT --->>> Send GID_FT */ + unf_disc_state_ma(lport, UNF_EVENT_DISC_RETRY_TIMEOUT); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + while ((ret != RETURN_OK) && + (disc->retry_count < disc->max_retry_count)) { + ret = unf_send_gid_ft(lport, rport); + disc->retry_count++; + } + } else if (disc->en_states == UNF_DISC_ST_GIDFT_WAIT) { + /* GID_FT_WAIT --->>> Send LOGO */ + unf_disc_state_ma(lport, UNF_EVENT_DISC_RETRY_TIMEOUT); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } else { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } + } +} + +enum unf_disc_state_e unf_disc_stat_start(enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + if (v_en_event == UNF_EVENT_DISC_NORMAL_ENTER) + en_next_state = UNF_DISC_ST_GIDPT_WAIT; + else + en_next_state = v_old_state; + + return en_next_state; +} + +enum unf_disc_state_e unf_disc_stat_gid_pt_wait( + enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + switch (v_en_event) { + case UNF_EVENT_DISC_FAILED: + en_next_state = UNF_DISC_ST_GIDPT_WAIT; + break; + + case UNF_EVENT_DISC_RETRY_TIMEOUT: + en_next_state = UNF_DISC_ST_GIDFT_WAIT; + break; + + case UNF_EVENT_DISC_SUCCESS: + en_next_state = UNF_DISC_ST_END; + break; + + case UNF_EVENT_DISC_LINKDOWN: + en_next_state = UNF_DISC_ST_START; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +enum unf_disc_state_e unf_disc_stat_gid_ft_wait( + enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + switch (v_en_event) { + case UNF_EVENT_DISC_FAILED: + en_next_state = UNF_DISC_ST_GIDFT_WAIT; + break; + + case UNF_EVENT_DISC_RETRY_TIMEOUT: + en_next_state = UNF_DISC_ST_END; + break; + + case UNF_EVENT_DISC_SUCCESS: + en_next_state = UNF_DISC_ST_END; + break; + + case UNF_EVENT_DISC_LINKDOWN: + en_next_state = UNF_DISC_ST_START; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +enum unf_disc_state_e unf_disc_stat_end(enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + if (v_en_event == UNF_EVENT_DISC_LINKDOWN) + en_next_state = UNF_DISC_ST_START; + else + en_next_state = v_old_state; + + return en_next_state; +} + +void unf_disc_state_ma(struct unf_lport_s *v_lport, + enum unf_disc_event_e v_en_event) +{ + struct unf_disc_s *disc = NULL; + enum unf_disc_state_e en_old_state = UNF_DISC_ST_START; + enum unf_disc_state_e en_next_state = UNF_DISC_ST_START; + + UNF_CHECK_VALID(0x674, UNF_TRUE, v_lport, return); + + disc = &v_lport->disc; + en_old_state = disc->en_states; + + switch (disc->en_states) { + case UNF_DISC_ST_START: + en_next_state = unf_disc_stat_start(en_old_state, v_en_event); + break; + + case UNF_DISC_ST_GIDPT_WAIT: + en_next_state = unf_disc_stat_gid_pt_wait(en_old_state, + v_en_event); + break; + + case UNF_DISC_ST_GIDFT_WAIT: + en_next_state = unf_disc_stat_gid_ft_wait(en_old_state, + v_en_event); + break; + + case UNF_DISC_ST_END: + en_next_state = unf_disc_stat_end(en_old_state, v_en_event); + break; + + default: + en_next_state = en_old_state; + break; + } + + unf_set_disc_state(disc, en_next_state); +} diff --git a/drivers/scsi/huawei/hifc/unf_disc.h b/drivers/scsi/huawei/hifc/unf_disc.h new file mode 100644 index 000000000000..45fc3e011974 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_disc.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_DISC_H__ +#define __UNF_DISC_H__ + +#define UNF_DISC_RETRY_TIMES 3 +#define UNF_DISC_NONE 0 +#define UNF_DISC_FABRIC 1 +#define UNF_DISC_LOOP 2 + +enum unf_disc_state_e { + UNF_DISC_ST_START = 0x3000, + UNF_DISC_ST_GIDPT_WAIT, + UNF_DISC_ST_GIDFT_WAIT, + UNF_DISC_ST_END +}; + +enum unf_disc_event_e { + UNF_EVENT_DISC_NORMAL_ENTER = 0x8000, + UNF_EVENT_DISC_FAILED = 0x8001, + UNF_EVENT_DISC_SUCCESS = 0x8002, + UNF_EVENT_DISC_RETRY_TIMEOUT = 0x8003, + UNF_EVENT_DISC_LINKDOWN = 0x8004 +}; + +enum unf_disc_type_e { + UNF_DISC_GET_PORT_NAME = 0, + UNF_DISC_GET_NODE_NAME, + UNF_DISC_GET_FEATURE +}; + +struct unf_disc_gs_event_info { + void *lport; + void *rport; + unsigned int rport_id; + enum unf_disc_type_e entype; + struct list_head list_entry; +}; + +unsigned int unf_get_and_post_disc_event(void *v_lport, + void *v_sns_port, + unsigned int v_nport_id, + enum unf_disc_type_e v_en_type); + +void unf_flush_disc_event(void *v_disc, void *v_vport); +void unf_disc_error_recovery(void *v_lport); +void unf_disc_mgr_destroy(void *v_lport); +void unf_disc_ctrl_size_inc(void *v_lport, unsigned int v_cmnd); + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_event.c b/drivers/scsi/huawei/hifc/unf_event.c new file mode 100644 index 000000000000..205824b633a4 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_event.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_event.h" +#include "unf_lport.h" + +struct unf_event_list fc_event_list; +struct unf_global_event_queue global_event_queue; + +/* Max global event node */ +#define UNF_MAX_GLOBAL_ENENT_NODE 24 + +unsigned int unf_init_event_msg(struct unf_lport_s *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned int i; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x770, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + event_mgr = &v_lport->event_mgr; + + /* Get and Initial Event Node resource */ + event_mgr->pmem_add = + vmalloc((size_t)event_mgr->free_event_count * + sizeof(struct unf_cm_event_report)); + if (!event_mgr->pmem_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate event manager failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(event_mgr->pmem_add, 0, + ((size_t)event_mgr->free_event_count * + sizeof(struct unf_cm_event_report))); + + event_node = (struct unf_cm_event_report *)(event_mgr->pmem_add); + + spin_lock_irqsave(&event_mgr->port_event_lock, flags); + for (i = 0; i < event_mgr->free_event_count; i++) { + INIT_LIST_HEAD(&event_node->list_entry); + list_add_tail(&event_node->list_entry, + &event_mgr->list_free_event); + event_node++; + } + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + + return RETURN_OK; +} + +static void unf_del_eventcenter(struct unf_lport_s *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + + UNF_CHECK_VALID(0x771, UNF_TRUE, v_lport, return); + + event_mgr = &v_lport->event_mgr; + event_mgr->pfn_unf_get_free_event = NULL; + event_mgr->pfn_unf_release_event = NULL; + event_mgr->pfn_unf_post_event = NULL; +} + +void unf_init_event_node(struct unf_cm_event_report *v_event_node) +{ + UNF_CHECK_VALID(0x776, UNF_TRUE, v_event_node, return); + + v_event_node->event = UNF_EVENT_TYPE_REQUIRE; + v_event_node->event_asy_flag = UNF_EVENT_ASYN; + v_event_node->delay_times = 0; + v_event_node->para_in = NULL; + v_event_node->para_out = NULL; + v_event_node->result = 0; + v_event_node->lport = NULL; + v_event_node->pfn_unf_event_task = NULL; + v_event_node->pfn_unf_event_recovery_strategy = NULL; + v_event_node->pfn_unf_event_alarm_strategy = NULL; +} + +struct unf_cm_event_report *unf_get_free_event_node(void *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_cm_event_report *event_node = NULL; + struct list_head *list_node = NULL; + struct unf_lport_s *root_lport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x777, UNF_TRUE, v_lport, return NULL); + root_lport = (struct unf_lport_s *)v_lport; + root_lport = root_lport->root_lport; + + if (unlikely(atomic_read(&root_lport->port_no_operater_flag) == + UNF_LPORT_NOP)) + return NULL; + + /* Get EventMgr from Lport */ + event_mgr = &root_lport->event_mgr; + + /* Get free node free pool */ + spin_lock_irqsave(&event_mgr->port_event_lock, flags); + if (list_empty(&event_mgr->list_free_event)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) have no event node anymore", + root_lport->port_id); + + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + return NULL; + } + + list_node = (&event_mgr->list_free_event)->next; + list_del(list_node); + event_mgr->free_event_count--; + event_node = list_entry(list_node, struct unf_cm_event_report, + list_entry); + + /* Initial event node */ + unf_init_event_node(event_node); + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + + return event_node; +} + +void unf_check_event_mgr_status(struct unf_event_mgr *v_event_mgr) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x773, UNF_TRUE, v_event_mgr, return); + + spin_lock_irqsave(&v_event_mgr->port_event_lock, flag); + if ((v_event_mgr->emg_completion) && + (v_event_mgr->free_event_count == UNF_MAX_EVENT_NODE)) { + complete(v_event_mgr->emg_completion); + } + spin_unlock_irqrestore(&v_event_mgr->port_event_lock, flag); +} + +void unf_release_event(void *v_lport, void *v_event_node) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_lport_s *root_lport = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x778, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x779, UNF_TRUE, v_event_node, return); + + event_node = (struct unf_cm_event_report *)v_event_node; + root_lport = (struct unf_lport_s *)v_lport; + root_lport = root_lport->root_lport; + event_mgr = &root_lport->event_mgr; + + spin_lock_irqsave(&event_mgr->port_event_lock, flags); + event_mgr->free_event_count++; + unf_init_event_node(event_node); + list_add_tail(&event_node->list_entry, &event_mgr->list_free_event); + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + + unf_check_event_mgr_status(event_mgr); +} + +void unf_post_event(void *v_lport, void *v_event_node) +{ + struct unf_cm_event_report *event_node = NULL; + struct unf_chip_manage_info_s *card_thread_info = NULL; + struct unf_lport_s *root_lport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x780, UNF_TRUE, v_event_node, return); + event_node = (struct unf_cm_event_report *)v_event_node; + UNF_REFERNCE_VAR(v_lport); + + /* If null, post to global event center */ + if (!v_lport) { + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, flags); + fc_event_list.list_num++; + list_add_tail(&event_node->list_entry, + &fc_event_list.list_head); + spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock, + flags); + + wake_up_process(event_thread); + } else { + root_lport = (struct unf_lport_s *)v_lport; + root_lport = root_lport->root_lport; + card_thread_info = root_lport->chip_info; + + /* Post to global event center */ + if (!card_thread_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, + UNF_WARN, + "[warn]Port(0x%x) has strange event with type(0x%x)", + root_lport->nport_id, event_node->event); + + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, + flags); + fc_event_list.list_num++; + list_add_tail(&event_node->list_entry, + &fc_event_list.list_head); + spin_unlock_irqrestore( + &fc_event_list.fc_eventlist_lock, + flags); + + wake_up_process(event_thread); + } else { + spin_lock_irqsave( + &card_thread_info->chip_event_list_lock, + flags); + card_thread_info->list_num++; + list_add_tail(&event_node->list_entry, + &card_thread_info->list_head); + spin_unlock_irqrestore( + &card_thread_info->chip_event_list_lock, + flags); + + wake_up_process(card_thread_info->data_thread); + } + } +} + +unsigned int unf_init_event_center(void *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x772, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + + /* Initial Disc manager */ + event_mgr = &lport->event_mgr; + event_mgr->free_event_count = UNF_MAX_EVENT_NODE; + event_mgr->pfn_unf_get_free_event = unf_get_free_event_node; + event_mgr->pfn_unf_release_event = unf_release_event; + event_mgr->pfn_unf_post_event = unf_post_event; + + INIT_LIST_HEAD(&event_mgr->list_free_event); + spin_lock_init(&event_mgr->port_event_lock); + event_mgr->emg_completion = NULL; + + ret = unf_init_event_msg(lport); + return ret; +} + +void unf_wait_event_mgr_complete(struct unf_event_mgr *v_event_mgr) +{ + struct unf_event_mgr *event_mgr = NULL; + int wait = UNF_FALSE; + unsigned long mg_flag = 0; + + struct completion fc_event_completion = + COMPLETION_INITIALIZER(fc_event_completion); + + UNF_CHECK_VALID(0x774, UNF_TRUE, v_event_mgr, return); + event_mgr = v_event_mgr; + + spin_lock_irqsave(&event_mgr->port_event_lock, mg_flag); + if (event_mgr->free_event_count != UNF_MAX_EVENT_NODE) { + event_mgr->emg_completion = &fc_event_completion; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&event_mgr->port_event_lock, mg_flag); + + if (wait == UNF_TRUE) + wait_for_completion(event_mgr->emg_completion); + + spin_lock_irqsave(&event_mgr->port_event_lock, mg_flag); + event_mgr->emg_completion = NULL; + spin_unlock_irqrestore(&event_mgr->port_event_lock, mg_flag); +} + +unsigned int unf_event_center_destroy(void *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + unsigned long list_lock_flag = 0; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x775, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + event_mgr = &lport->event_mgr; + + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, list_lock_flag); + if (!list_empty(&fc_event_list.list_head)) { + list_for_each_safe(list, list_tmp, &fc_event_list.list_head) { + event_node = list_entry(list, + struct unf_cm_event_report, + list_entry); + if (lport == event_node->lport) { + list_del_init(&event_node->list_entry); + if (event_node->event_asy_flag == + UNF_EVENT_SYN) { + event_node->result = UNF_RETURN_ERROR; + complete(&event_node->event_comp); + } + + spin_lock_irqsave(&event_mgr->port_event_lock, + flag); + event_mgr->free_event_count++; + list_add_tail(&event_node->list_entry, + &event_mgr->list_free_event); + spin_unlock_irqrestore( + &event_mgr->port_event_lock, flag); + } + } + } + spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock, + list_lock_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait event", lport->port_id); + unf_wait_event_mgr_complete(event_mgr); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait event process end", lport->port_id); + + unf_del_eventcenter(lport); + vfree(event_mgr->pmem_add); + event_mgr->pmem_add = NULL; + lport->destroy_step = UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER; + + return ret; +} + +static void unf_procee_asyn_event(struct unf_cm_event_report *v_event_node) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = (struct unf_lport_s *)v_event_node->lport; + + UNF_CHECK_VALID(0x782, UNF_TRUE, lport, return); + if (v_event_node->pfn_unf_event_task) + ret = (unsigned int) + v_event_node->pfn_unf_event_task(v_event_node->para_in, + v_event_node->para_out); + + if (lport->event_mgr.pfn_unf_release_event) + lport->event_mgr.pfn_unf_release_event(lport, v_event_node); + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_WARN, + "[warn]Port(0x%x) handle event(0x%x) failed", + lport->port_id, v_event_node->event); + } + + UNF_REFERNCE_VAR(ret); +} + +void unf_release_global_event(void *v_event_node) +{ + unsigned long flag = 0; + struct unf_cm_event_report *event_node = NULL; + + UNF_CHECK_VALID(0x784, UNF_TRUE, v_event_node, return); + event_node = (struct unf_cm_event_report *)v_event_node; + unf_init_event_node(event_node); + + spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag); + global_event_queue.list_number++; + list_add_tail(&event_node->list_entry, + &global_event_queue.global_eventlist); + spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock, + flag); +} + +void unf_handle_event(struct unf_cm_event_report *v_event_node) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int event = 0; + unsigned int event_asy_flag = UNF_EVENT_ASYN; + + UNF_CHECK_VALID(0x781, UNF_TRUE, v_event_node, return); + UNF_REFERNCE_VAR(ret); + UNF_REFERNCE_VAR(event); + + event = v_event_node->event; + event_asy_flag = v_event_node->event_asy_flag; + + switch (event_asy_flag) { + case UNF_EVENT_SYN: /* synchronous event node */ + case UNF_GLOBAL_EVENT_SYN: + if (v_event_node->pfn_unf_event_task) { + ret = (unsigned int)v_event_node->pfn_unf_event_task( + v_event_node->para_in, + v_event_node->para_out); + } + v_event_node->result = ret; + complete(&v_event_node->event_comp); + break; + case UNF_EVENT_ASYN: /* asynchronous event node */ + unf_procee_asyn_event(v_event_node); + break; + case UNF_GLOBAL_EVENT_ASYN: + if (v_event_node->pfn_unf_event_task) { + ret = (unsigned int)v_event_node->pfn_unf_event_task( + v_event_node->para_in, + v_event_node->para_out); + } + unf_release_global_event(v_event_node); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_EVENT, UNF_WARN, + "[warn]handle global event(0x%x) failed", + event); + } + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_WARN, + "[warn]Unknown event(0x%x)", event); + break; + } +} + +unsigned int unf_init_global_event_msg(void) +{ + struct unf_cm_event_report *event_node = NULL; + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned long flag = 0; + + INIT_LIST_HEAD(&global_event_queue.global_eventlist); + spin_lock_init(&global_event_queue.global_eventlist_lock); + global_event_queue.list_number = 0; + + global_event_queue.global_event_add = + vmalloc(UNF_MAX_GLOBAL_ENENT_NODE * + sizeof(struct unf_cm_event_report)); + if (!global_event_queue.global_event_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Can't allocate global event queue"); + + return UNF_RETURN_ERROR; + } + memset(global_event_queue.global_event_add, 0, + (sizeof(struct unf_cm_event_report) * + UNF_MAX_GLOBAL_ENENT_NODE)); + event_node = (struct unf_cm_event_report *) + (global_event_queue.global_event_add); + + spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag); + for (i = 0; i < UNF_MAX_GLOBAL_ENENT_NODE; i++) { + INIT_LIST_HEAD(&event_node->list_entry); + list_add_tail(&event_node->list_entry, + &global_event_queue.global_eventlist); + global_event_queue.list_number++; + event_node++; + } + spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock, + flag); + + return ret; +} + +void unf_destroy_global_event_msg(void) +{ + if (global_event_queue.list_number != UNF_MAX_GLOBAL_ENENT_NODE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_CRITICAL, + "[warn]Global event release not complete with remain nodes(0x%x)", + global_event_queue.list_number); + } + + vfree(global_event_queue.global_event_add); +} + +unsigned int unf_schedule_global_event( + void *v_para, + unsigned int v_event_asy_flag, + int (*pfn_unf_event_task)(void *v_argin, void *v_argout)) +{ + struct list_head *list_node = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x783, UNF_TRUE, pfn_unf_event_task, + return UNF_RETURN_ERROR); + + if ((v_event_asy_flag != UNF_GLOBAL_EVENT_ASYN) && + (v_event_asy_flag != UNF_GLOBAL_EVENT_SYN)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Event async flag(0x%x) abnormity", + v_event_asy_flag); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag); + if (list_empty(&global_event_queue.global_eventlist)) { + spin_unlock_irqrestore( + &global_event_queue.global_eventlist_lock, flag); + + return UNF_RETURN_ERROR; + } + + list_node = (&global_event_queue.global_eventlist)->next; + list_del_init(list_node); + global_event_queue.list_number--; + event_node = list_entry(list_node, struct unf_cm_event_report, + list_entry); + spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock, + flag); + + /* Initial global event */ + unf_init_event_node(event_node); + init_completion(&event_node->event_comp); + event_node->event_asy_flag = v_event_asy_flag; + event_node->pfn_unf_event_task = pfn_unf_event_task; + event_node->para_in = (void *)v_para; + event_node->para_out = NULL; + + unf_post_event(NULL, event_node); + + if (v_event_asy_flag == UNF_GLOBAL_EVENT_SYN) { + /* must wait for complete */ + wait_for_completion(&event_node->event_comp); + ret = event_node->result; + unf_release_global_event(event_node); + } else { + ret = RETURN_OK; + } + + return ret; +} + +struct unf_cm_event_report *unf_get_one_event_node(void *v_lport) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + + UNF_CHECK_VALID(0x785, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x786, UNF_TRUE, + lport->event_mgr.pfn_unf_get_free_event, + return NULL); + + return lport->event_mgr.pfn_unf_get_free_event((void *)lport); +} + +void unf_post_one_event_node(void *v_lport, + struct unf_cm_event_report *v_event) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + + UNF_CHECK_VALID(0x787, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x788, UNF_TRUE, v_event, return); + + UNF_CHECK_VALID(0x789, UNF_TRUE, lport->event_mgr.pfn_unf_post_event, + return); + UNF_CHECK_VALID(0x790, UNF_TRUE, v_event, return); + + lport->event_mgr.pfn_unf_post_event((void *)lport, v_event); +} + diff --git a/drivers/scsi/huawei/hifc/unf_event.h b/drivers/scsi/huawei/hifc/unf_event.h new file mode 100644 index 000000000000..4f78d1c538b8 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_event.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_EVENT_H__ +#define __UNF_EVENT_H__ +#include "hifc_knl_adp.h" + +enum unf_poll_flag { + UNF_POLL_CHIPERROR_FLAG = 0, /* CHIP ERROR POLL */ + UNF_POLL_ERROR_CODE, /* CODE ERROR POLL */ + UNF_POLL_SFP_FLAG, /* SFP POLL */ + UNF_POLL_BUTT +}; + +#define UNF_MAX_EVENT_NODE 256 + +enum unf_event_type { + UNF_EVENT_TYPE_ALARM = 0, /* Alarm */ + UNF_EVENT_TYPE_REQUIRE, /* Require */ + UNF_EVENT_TYPE_RECOVERY, /* Recovery */ + UNF_EVENT_TYPE_BUTT +}; + +struct unf_cm_event_report { + /* event type */ + unsigned int event; + + /* ASY flag */ + unsigned int event_asy_flag; + + /* Delay times,must be async event */ + unsigned int delay_times; + + struct list_head list_entry; + + void *lport; + + /* parameter */ + void *para_in; + void *para_out; + unsigned int result; + + /* recovery strategy */ + int (*pfn_unf_event_task)(void *v_argin, void *v_argout); + + /* recovery strategy */ + int (*pfn_unf_event_recovery_strategy)(void *); + + /* alarm strategy */ + int (*pfn_unf_event_alarm_strategy)(void *); + + struct completion event_comp; +}; + +struct unf_event_mgr { + spinlock_t port_event_lock; + unsigned int free_event_count; + + struct list_head list_free_event; + + struct completion *emg_completion; + + void *pmem_add; + struct unf_cm_event_report *(*pfn_unf_get_free_event)(void *v_lport); + void (*pfn_unf_release_event)(void *v_lport, void *v_event_node); + void (*pfn_unf_post_event)(void *v_lport, void *v_event_node); +}; + +struct unf_global_event_queue { + void *global_event_add; + unsigned int list_number; + struct list_head global_eventlist; + spinlock_t global_eventlist_lock; +}; + +struct unf_event_list { + struct list_head list_head; + spinlock_t fc_eventlist_lock; + unsigned int list_num; /* list node number */ +}; + +void unf_handle_event(struct unf_cm_event_report *v_event_node); +unsigned int unf_init_global_event_msg(void); +void unf_destroy_global_event_msg(void); +unsigned int unf_schedule_global_event( + void *v_para, + unsigned int v_event_asy_flag, + int (*pfn_unf_event_task)(void *v_argin, void *v_argout)); + +struct unf_cm_event_report *unf_get_one_event_node(void *v_lport); +void unf_post_one_event_node(void *v_lport, + struct unf_cm_event_report *v_event); +unsigned int unf_event_center_destroy(void *v_lport); +unsigned int unf_init_event_center(void *v_lport); + +extern struct task_struct *event_thread; +extern struct unf_global_event_queue global_event_queue; +extern struct unf_event_list fc_event_list; +#endif diff --git a/drivers/scsi/huawei/hifc/unf_exchg.c b/drivers/scsi/huawei/hifc/unf_exchg.c new file mode 100644 index 000000000000..f3234a9edc22 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_exchg.c @@ -0,0 +1,3632 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_exchg.h" +#include "unf_rport.h" +#include "unf_service.h" +#include "unf_io.h" + +#define UNF_DEL_XCHG_TIMER_SAFE(v_xchg) \ + do { \ + if (cancel_delayed_work(&((v_xchg)->timeout_work))) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, \ + UNF_MAJOR, \ + "Exchange(0x%p) is free, but timer is pending.", \ + v_xchg); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, \ + UNF_CRITICAL, \ + "Exchange(0x%p) is free, but timer is running.", \ + v_xchg); \ + } \ + } while (0) + +#define UNF_XCHG_IS_ELS_REPLY(v_xchg) \ + ((((v_xchg)->cmnd_code & 0x0ffff) == ELS_ACC) || \ + (((v_xchg)->cmnd_code & 0x0ffff) == ELS_RJT)) + +static struct unf_ioflow_id_s io_stage[] = { + { "XCHG_ALLOC" }, + { "TGT_RECEIVE_ABTS" }, + { "TGT_ABTS_DONE" }, + { "TGT_IO_SRR" }, + { "SFS_RESPONSE" }, + { "SFS_TIMEOUT" }, + { "INI_SEND_CMND" }, + { "INI_RESPONSE_DONE" }, + { "INI_EH_ABORT" }, + { "INI_EH_DEVICE_RESET" }, + { "INI_EH_BLS_DONE" }, + { "INI_IO_TIMEOUT" }, + { "INI_REQ_TIMEOUT" }, + { "XCHG_CANCEL_TIMER" }, + { "XCHG_FREE_XCHG" }, + { "SEND_ELS" }, + { "IO_XCHG_WAIT" }, +}; + +void unf_wakeup_scsi_task_cmnd(struct unf_lport_s *v_lport) +{ + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long hot_pool_lock_flags = 0; + unsigned long xchg_flag = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned int i = 0; + + UNF_CHECK_VALID(0x850, UNF_TRUE, v_lport, return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + + if (!xchg_mgr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_EVENT, UNF_MINOR, + "Can't find LPort(0x%x) MgrIdx %u exchange manager.", + v_lport->port_id, i); + continue; + } + + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + list_for_each_safe(node, next_node, + &xchg_mgr->hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); + if (INI_IO_STATE_UPTASK & xchg->io_state && + (atomic_read(&xchg->ref_cnt) > 0)) { + UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS); + up(&xchg->task_sema); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_EVENT, UNF_MINOR, + "Wake up task command exchange(0x%p), Hot Pool Tag(0x%x).", + xchg, xchg->hot_pool_tag); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flag); + } + + spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + } +} + +void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, unsigned int v_did, + unsigned int v_extra_io_state) +{ + /* + * for target session: set ABORT + * 1. R_Port remove + * 2. Send PLOGI_ACC callback + * 3. RCVD PLOGI + * 4. RCVD LOGO + */ + UNF_CHECK_VALID(0x852, UNF_TRUE, v_lport, return); + + if (v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort) { + /* The SID/DID of the Xchg is in reverse direction in + * different phases. Therefore, the reverse direction + * needs to be considered + */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort( + v_lport, + v_rport, + v_sid, v_did, + v_extra_io_state); + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort( + v_lport, v_rport, + v_did, v_sid, + v_extra_io_state); + } +} + +void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, unsigned int v_did) +{ + UNF_CHECK_VALID(0x990, UNF_TRUE, v_lport, return); + + if (v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort) { + /* The SID/DID of the Xchg is in reverse direction in different + * phases, therefore, the reverse direction + * needs to be considered + */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort(v_lport, + v_rport, + v_sid, + v_did); + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort(v_lport, + v_rport, + v_did, + v_sid); + } +} + +void unf_cm_xchg_abort_by_lun(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned long long v_lun_id, + void *v_tm_xchg, + int v_abort_all_lun_flag) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + void (*unf_xchg_abort_by_lun)(void*, void*, unsigned long long, + void*, int) = NULL; + + UNF_CHECK_VALID(0x853, UNF_TRUE, v_lport, return); + + unf_xchg_abort_by_lun = + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_lun; + if (unf_xchg_abort_by_lun) { + unf_xchg_abort_by_lun((void *)v_lport, (void *)v_rport, + v_lun_id, v_tm_xchg, + v_abort_all_lun_flag); + } +} + +void unf_cm_xchg_abort_by_session(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + void (*pfn_unf_xchg_abort_by_session)(void*, void*) = NULL; + + UNF_CHECK_VALID(0x853, UNF_TRUE, v_lport, return); + + pfn_unf_xchg_abort_by_session = + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_session; + if (pfn_unf_xchg_abort_by_session) { + pfn_unf_xchg_abort_by_session((void *)v_lport, + (void *)v_rport); + } +} + +void *unf_cm_get_free_xchg(void *v_lport, unsigned int v_xchg_type) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x855, UNF_TRUE, unlikely(v_lport), return NULL); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + /* Find the corresponding Lport Xchg management template. */ + UNF_CHECK_VALID(0x856, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_xchg_get_free_and_init), + return NULL); + + return xch_mgr_temp->pfn_unf_xchg_get_free_and_init(lport, v_xchg_type, + INVALID_VALUE16); +} + +void unf_cm_free_xchg(void *v_lport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x857, UNF_TRUE, unlikely(v_lport), return); + UNF_CHECK_VALID(0x858, UNF_TRUE, unlikely(v_xchg), return); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + UNF_CHECK_VALID(0x859, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_xchg_release), + return); + + /* + * unf_cm_free_xchg --->>> unf_free_xchg + * --->>> unf_xchg_ref_dec --->>> unf_free_fcp_xchg + * --->>> unf_done_ini_xchg + */ + xch_mgr_temp->pfn_unf_xchg_release(v_lport, v_xchg); +} + +void *unf_cm_lookup_xchg_by_tag(void *v_lport, unsigned short v_hot_pool_tag) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x860, UNF_TRUE, unlikely(v_lport), return NULL); + + /* Find the corresponding Lport Xchg management template */ + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + UNF_CHECK_VALID(0x861, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_tag), + return NULL); + + return xch_mgr_temp->pfn_unf_look_up_xchg_by_tag(v_lport, + v_hot_pool_tag); +} + +void *unf_cm_lookup_xchg_by_id(void *v_lport, unsigned short v_ox_id, + unsigned int v_oid) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x862, UNF_TRUE, unlikely(v_lport), return NULL); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + /* Find the corresponding Lport Xchg management template */ + UNF_CHECK_VALID(0x863, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_id), + return NULL); + + return xch_mgr_temp->pfn_unf_look_up_xchg_by_id(v_lport, v_ox_id, + v_oid); +} + +struct unf_xchg_s *unf_cm_lookup_xchg_by_cmnd_sn( + void *v_lport, + unsigned long long v_command_sn, + unsigned int v_world_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + struct unf_xchg_s *xchg = NULL; + + UNF_CHECK_VALID(0x864, UNF_TRUE, unlikely(v_lport), return NULL); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + UNF_CHECK_VALID( + 0x865, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_cmnd_sn), + return NULL); + + xchg = + (struct unf_xchg_s *)xch_mgr_temp->pfn_unf_look_up_xchg_by_cmnd_sn( + lport, v_command_sn, + v_world_id); + + return xchg; +} + +static void unf_free_all_rsp_pages(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned int buff_index; + + UNF_CHECK_VALID(0x868, UNF_TRUE, v_xchg_mgr, return); + + if (v_xchg_mgr->rsp_buf_list.buflist) { + for (buff_index = 0; buff_index < + v_xchg_mgr->rsp_buf_list.buf_num; + buff_index++) { + if (v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr) { + dma_free_coherent( + &v_xchg_mgr->hot_pool->lport->low_level_func.dev->dev, + v_xchg_mgr->rsp_buf_list.buf_size, + v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr, + v_xchg_mgr->rsp_buf_list.buflist[buff_index].paddr); + v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr = NULL; + } + } + + kfree(v_xchg_mgr->rsp_buf_list.buflist); + v_xchg_mgr->rsp_buf_list.buflist = NULL; + } +} + +static unsigned int unf_init_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr, + unsigned int v_xchg_sum, + unsigned int v_sfs_sum) +{ + struct unf_xchg_s *xchg_mem = NULL; + union unf_sfs_u *sfs_mm_start = NULL; + dma_addr_t sfs_dma_addr; + struct unf_xchg_s *xchg = NULL; + struct unf_xchg_free_pool_s *free_pool = NULL; + unsigned int rsp_iu_nums_per_page = 0; + unsigned int rsp_iu_size = 0; + unsigned long flags = 0; + unsigned int xchg_sum = 0; + unsigned int i = 0; + unsigned int rsp_iu_loop = 0; + unsigned int buf_num; + unsigned int buf_size; + unsigned int curbuf_idx = 0; + void *page_addr; + dma_addr_t phy_addr; + + UNF_CHECK_VALID(0x871, UNF_TRUE, v_sfs_sum <= v_xchg_sum, + return UNF_RETURN_ERROR); + + free_pool = &v_xchg_mgr->free_pool; + xchg_sum = v_xchg_sum; + xchg_mem = v_xchg_mgr->fcp_mm_start; + xchg = xchg_mem; + + sfs_mm_start = (union unf_sfs_u *)v_xchg_mgr->sfs_mm_start; + sfs_dma_addr = v_xchg_mgr->sfs_phy_addr; + /* 1. Allocate the SFS UNION memory to each SFS XCHG + * and mount the SFS XCHG to the corresponding FREE linked list + */ + free_pool->total_sfs_xchg = 0; + free_pool->sfs_xchg_sum = v_sfs_sum; + for (i = 0; i < v_sfs_sum; i++) { + INIT_LIST_HEAD(&xchg->list_xchg_entry); + INIT_LIST_HEAD(&xchg->list_esgls); + spin_lock_init(&xchg->xchg_state_lock); + sema_init(&xchg->task_sema, 0); + sema_init(&xchg->echo_info.echo_sync_sema, 0); + + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags); + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr = sfs_mm_start; + xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr = sfs_dma_addr; + xchg->fcp_sfs_union.sfs_entry.sfs_buff_len = + sizeof(*sfs_mm_start); + list_add_tail(&xchg->list_xchg_entry, + &free_pool->list_sfs_xchg_list); + free_pool->total_sfs_xchg++; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + sfs_mm_start++; + sfs_dma_addr = sfs_dma_addr + sizeof(union unf_sfs_u); + xchg++; + } + + /* + * 2. Allocate RSP IU memory for each IO XCHG and mount IO + * XCHG to the corresponding FREE linked list + * The memory size of each RSP IU is rsp_iu_size. + */ + rsp_iu_size = (UNF_FCPRSP_CTL_LEN + UNF_MAX_RSP_INFO_LEN + + UNF_SCSI_SENSE_DATA_LEN); + + buf_size = BUF_LIST_PAGE_SIZE; + if ((xchg_sum - v_sfs_sum) * rsp_iu_size < BUF_LIST_PAGE_SIZE) + buf_size = (xchg_sum - v_sfs_sum) * rsp_iu_size; + + rsp_iu_nums_per_page = buf_size / rsp_iu_size; + buf_num = (xchg_sum - v_sfs_sum) % rsp_iu_nums_per_page ? + (xchg_sum - v_sfs_sum) / rsp_iu_nums_per_page + 1 : + (xchg_sum - v_sfs_sum) / rsp_iu_nums_per_page; + + v_xchg_mgr->rsp_buf_list.buflist = + (struct buff_list_s *)kmalloc( + buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + v_xchg_mgr->rsp_buf_list.buf_num = buf_num; + v_xchg_mgr->rsp_buf_list.buf_size = buf_size; + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) buff num 0x%x buff size 0x%x", + v_lport->port_id, buf_num, + v_xchg_mgr->rsp_buf_list.buf_size); + + if (!v_xchg_mgr->rsp_buf_list.buflist) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate BigSfs pool buf list failed out of memory"); + goto free_buff; + } + memset(v_xchg_mgr->rsp_buf_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + free_pool->total_fcp_xchg = 0; + for (i = 0, curbuf_idx = 0; curbuf_idx < buf_num; curbuf_idx++) { + page_addr = dma_alloc_coherent( + &v_lport->low_level_func.dev->dev, + v_xchg_mgr->rsp_buf_list.buf_size, + &phy_addr, GFP_KERNEL); + if (!page_addr) + goto free_buff; + + memset(page_addr, 0, v_xchg_mgr->rsp_buf_list.buf_size); + v_xchg_mgr->rsp_buf_list.buflist[curbuf_idx].vaddr = page_addr; + v_xchg_mgr->rsp_buf_list.buflist[curbuf_idx].paddr = phy_addr; + + for (rsp_iu_loop = 0; + (rsp_iu_loop < rsp_iu_nums_per_page && + i < xchg_sum - v_sfs_sum); rsp_iu_loop++) { + INIT_LIST_HEAD(&xchg->list_xchg_entry); + + INIT_LIST_HEAD(&xchg->list_esgls); + spin_lock_init(&xchg->xchg_state_lock); + sema_init(&xchg->task_sema, 0); + sema_init(&xchg->echo_info.echo_sync_sema, 0); + + /* alloc dma buffer for fcp_rsp_iu */ + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, + flags); + xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu = + (struct unf_fcprsp_iu_s *)page_addr; + xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr = + phy_addr; + list_add_tail(&xchg->list_xchg_entry, + &free_pool->list_free_xchg_list); + free_pool->total_fcp_xchg++; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, + flags); + + page_addr += rsp_iu_size; + phy_addr += rsp_iu_size; + i++; + xchg++; + } + } + + free_pool->fcp_xchg_sum = free_pool->total_fcp_xchg; + + return RETURN_OK; +free_buff: + unf_free_all_rsp_pages(v_xchg_mgr); + return UNF_RETURN_ERROR; +} + +static unsigned int unf_get_xchg_config_sum(struct unf_lport_s *v_lport, + unsigned int *v_xchg_sum) +{ + struct unf_lport_cfg_item_s *lport_cfg_items = NULL; + + lport_cfg_items = &v_lport->low_level_func.lport_cfg_items; + + /* It has been checked at the bottom layer. + * Don't need to check it again. + */ + *v_xchg_sum = lport_cfg_items->max_sfs_xchg + lport_cfg_items->max_io; + if ((*v_xchg_sum / UNF_EXCHG_MGR_NUM) == 0 || + lport_cfg_items->max_sfs_xchg / UNF_EXCHG_MGR_NUM == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) Xchgsum(%u) or SfsXchg(%u) is less than ExchangeMgrNum(%u).", + v_lport->port_id, *v_xchg_sum, + lport_cfg_items->max_sfs_xchg, + UNF_EXCHG_MGR_NUM); + return UNF_RETURN_ERROR; + } + + if (*v_xchg_sum > (INVALID_VALUE16 - 1)) { + /* If the format of ox_id/rx_id is exceeded, + * this function is not supported + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) Exchange num(0x%x) is Too Big.", + v_lport->port_id, *v_xchg_sum); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static void unf_xchg_cancel_timer(void *v_xchg) +{ + struct unf_xchg_s *xchg = NULL; + int need_dec_xchg_ref = UNF_FALSE; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x874, UNF_TRUE, v_xchg, return); + xchg = (struct unf_xchg_s *)v_xchg; + + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + if (cancel_delayed_work(&xchg->timeout_work)) + need_dec_xchg_ref = UNF_TRUE; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + if (need_dec_xchg_ref == UNF_TRUE) + unf_xchg_ref_dec(v_xchg, XCHG_CANCEL_TIMER); +} + +void unf_show_all_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x879, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x880, UNF_TRUE, v_xchg_mgr, return); + + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(xchg); + + xchg_mgr = v_xchg_mgr; + lport = v_lport; + + /* hot Xchg */ + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "INI busy :"); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->ini_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + atomic_read(&xchg->ref_cnt), + (unsigned int)xchg->io_state, + xchg->alloc_jif); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, + UNF_WARN, "SFS :"); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->sfs_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "0x%p---0x%x---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + xchg->cmnd_code, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + atomic_read(&xchg->ref_cnt), + (unsigned int)xchg->io_state, + xchg->alloc_jif); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "Destroy list."); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->list_destroy_xchg) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + atomic_read(&xchg->ref_cnt), + (unsigned int)xchg->io_state, + xchg->alloc_jif); + } + spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock, flags); + + UNF_REFERNCE_VAR(xchg); + UNF_REFERNCE_VAR(lport); +} + +static void unf_delay_work_del_syn(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_s *xchg = NULL; + + UNF_CHECK_VALID(0x884, UNF_TRUE, v_xchg, return); + + xchg = v_xchg; + + /* synchronous release timer */ + if (!cancel_delayed_work_sync(&xchg->timeout_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Exchange(0x%p), State(0x%x) can't delete work timer, timer is running or no timer.", + xchg, xchg->io_state); + } else { + /* The reference count cannot be directly subtracted. + * This prevents the XCHG from being moved to the + * Free linked list when the card is unloaded. + */ + unf_cm_free_xchg(xchg->lport, xchg); + } +} + +static void unf_free_lport_sfs_xchg(struct unf_xchg_mgr_s *v_xchg_mgr, + int v_done_ini_flag) +{ + struct list_head *list = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long hot_pool_lock_flags = 0; + + UNF_REFERNCE_VAR(v_done_ini_flag); + UNF_CHECK_VALID(0x887, UNF_TRUE, v_xchg_mgr, return); + UNF_CHECK_VALID(0x888, UNF_TRUE, v_xchg_mgr->hot_pool, return); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + while (!list_empty(&v_xchg_mgr->hot_pool->sfs_busylist)) { + list = (&v_xchg_mgr->hot_pool->sfs_busylist)->next; + list_del_init(list); + + /* Prevent the xchg of the sfs from being accessed repeatedly. + * The xchg is first mounted to the destroy linked list. + */ + list_add_tail(list, &v_xchg_mgr->hot_pool->list_destroy_xchg); + + xchg = list_entry(list, struct unf_xchg_s, list_xchg_entry); + spin_unlock_irqrestore( + &v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + unf_delay_work_del_syn(xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Free SFS Exchange(0x%p), State(0x%x), Reference count(%d), Start time(%llu).", + xchg, xchg->io_state, atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + + unf_cm_free_xchg(xchg->lport, xchg); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + } + spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); +} + +static void unf_free_lport_destroy_xchg(struct unf_xchg_mgr_s *v_xchg_mgr) +{ +#define UNF_WAIT_DESTROY_EMPTY_STEP_MS 1000 +#define UNF_WAIT_IO_STATE_TGT_FRONT_MS (10 * 1000) + + struct unf_xchg_s *xchg = NULL; + struct list_head *next_xchg_node = NULL; + unsigned long hot_pool_lock_flags = 0; + unsigned long xchg_flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg_mgr, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg_mgr->hot_pool, + return); + + /* In this case, the timer on the destroy linked list is deleted. + * You only need to check whether the timer is released + * at the end of the tgt. + */ + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + while (!list_empty(&v_xchg_mgr->hot_pool->list_destroy_xchg)) { + next_xchg_node = + (&v_xchg_mgr->hot_pool->list_destroy_xchg)->next; + xchg = list_entry(next_xchg_node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Free Exchange(0x%p), Type(0x%x), State(0x%x), Reference count(%d), Start time(%llu)", + xchg, xchg->xchg_type, xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + + spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); + spin_unlock_irqrestore( + &v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + + /* This interface can be invoked to ensure that + * the timer is successfully canceled + * or wait until the timer execution is complete + */ + unf_delay_work_del_syn(xchg); + + /* + * If the timer is canceled successfully, delete Xchg + * If the timer has burst, the Xchg may have been released, + * In this case, deleting the Xchg will be failed + */ + unf_cm_free_xchg(xchg->lport, xchg); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + }; + + spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); +} + +static unsigned int unf_free_lport_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ +#define UNF_OS_WAITIO_TIMEOUT (10 * 1000) + + unsigned long free_pool_lock_flags = 0; + int wait = UNF_FALSE; + unsigned int total_xchg = 0; + unsigned int total_xchg_sum = 0; + unsigned int ret = RETURN_OK; + unsigned long long timeout = 0; + + struct completion xchg_mgr_completion = + COMPLETION_INITIALIZER(xchg_mgr_completion); + + UNF_CHECK_VALID(0x881, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x882, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x883, UNF_TRUE, v_xchg_mgr->hot_pool, + return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_lport); + + unf_free_lport_sfs_xchg(v_xchg_mgr, UNF_FALSE); + + /* free INI Mode exchanges belong to L_Port */ + unf_free_lport_ini_xchg(v_xchg_mgr, UNF_FALSE); + + spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + total_xchg = v_xchg_mgr->free_pool.total_fcp_xchg + + v_xchg_mgr->free_pool.total_sfs_xchg; + total_xchg_sum = v_xchg_mgr->free_pool.fcp_xchg_sum + + v_xchg_mgr->free_pool.sfs_xchg_sum; + if (total_xchg != total_xchg_sum) { + v_xchg_mgr->free_pool.xchg_mgr_completion = + &xchg_mgr_completion; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for exchange manager completion(%ld) (0x%x:0x%x)", + v_lport->port_id, jiffies, total_xchg, + total_xchg_sum); + + unf_show_all_xchg(v_lport, v_xchg_mgr); + + timeout = wait_for_completion_timeout( + v_xchg_mgr->free_pool.xchg_mgr_completion, + msecs_to_jiffies(UNF_OS_WAITIO_TIMEOUT)); + if (timeout == 0) + unf_free_lport_destroy_xchg(v_xchg_mgr); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for exchange manager completion end", + v_lport->port_id); + + spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + v_xchg_mgr->free_pool.xchg_mgr_completion = NULL; + spin_unlock_irqrestore( + &v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + } + + return ret; +} + +void unf_free_lport_all_xchg(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr; + unsigned int i; + + UNF_CHECK_VALID(0x881, UNF_TRUE, v_lport, return); + UNF_REFERNCE_VAR(v_lport); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + v_lport->port_id); + + continue; + } + unf_free_lport_sfs_xchg(xchg_mgr, UNF_FALSE); + + /* free INI Mode exchanges belong to L_Port */ + unf_free_lport_ini_xchg(xchg_mgr, UNF_FALSE); + + unf_free_lport_destroy_xchg(xchg_mgr); + } +} + +void unf_free_lport_ini_xchg(struct unf_xchg_mgr_s *v_xchg_mgr, + int v_done_ini_flag) +{ + /* + * 1. L_Port destroy + * 2. AC power down + */ + struct list_head *list = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long hot_pool_lock_flags = 0; + unsigned int up_status = 0; + + UNF_REFERNCE_VAR(v_done_ini_flag); + UNF_CHECK_VALID(0x889, UNF_TRUE, v_xchg_mgr, return); + UNF_CHECK_VALID(0x890, UNF_TRUE, v_xchg_mgr->hot_pool, return); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + while (!list_empty(&v_xchg_mgr->hot_pool->ini_busylist)) { + /* for each INI busy_list (exchange) node */ + list = (&v_xchg_mgr->hot_pool->ini_busylist)->next; + + /* Put exchange node to destroy_list, prevent done repeatly */ + list_del_init(list); + list_add_tail(list, &v_xchg_mgr->hot_pool->list_destroy_xchg); + xchg = list_entry(list, struct unf_xchg_s, list_xchg_entry); + if (atomic_read(&xchg->ref_cnt) <= 0) + continue; + spin_unlock_irqrestore( + &v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + unf_delay_work_del_syn(xchg); + + /* In the case of INI done, the command should be set to fail + * to prevent data inconsistency caused by the return of OK + */ + up_status = unf_get_uplevel_cmnd_errcode( + xchg->scsi_cmnd_info.err_code_table, + xchg->scsi_cmnd_info.err_code_table_cout, + UNF_IO_PORT_LOGOUT); + + if (xchg->io_state & INI_IO_STATE_UPABORT) { + /* + * About L_Port destroy or AC power down: + * UP_ABORT ---to--->>> ABORT_Port_Removing + */ + up_status = UNF_IO_ABORT_PORT_REMOVING; + } + + xchg->scsi_cmnd_info.result = up_status; + up(&xchg->task_sema); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Free INI exchange(0x%p) state(0x%x) reference count(%d) start time(%llu)", + xchg, xchg->io_state, atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + + unf_cm_free_xchg(xchg->lport, xchg); + + /* go to next INI busy_list (exchange) node */ + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + } + spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); +} + +static void unf_free_all_big_sfs(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_mgr_s *xchg_mgr = v_xchg_mgr; + struct unf_big_sfs_s *big_sfs = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + unsigned int buff_index; + + UNF_CHECK_VALID(0x891, UNF_TRUE, xchg_mgr, return); + + /* Release the free resources in the busy state */ + spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag); + list_for_each_safe(node, next_node, + &xchg_mgr->st_big_sfs_pool.list_busy_pool) { + list_del(node); + list_add_tail(node, &xchg_mgr->st_big_sfs_pool.list_free_pool); + } + + list_for_each_safe(node, next_node, + &xchg_mgr->st_big_sfs_pool.list_free_pool) { + list_del(node); + big_sfs = list_entry(node, struct unf_big_sfs_s, + entry_big_sfs); + if (big_sfs->vaddr) + big_sfs->vaddr = NULL; + } + spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); + + if (xchg_mgr->big_sfs_buf_list.buflist) { + for (buff_index = 0; + buff_index < xchg_mgr->big_sfs_buf_list.buf_num; + buff_index++) { + if (xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr) { + kfree(xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr); + xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr = NULL; + } + } + + kfree(xchg_mgr->big_sfs_buf_list.buflist); + xchg_mgr->big_sfs_buf_list.buflist = NULL; + } +} + +static void unf_free_big_sfs_pool(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + UNF_CHECK_VALID(0x892, UNF_TRUE, v_xchg_mgr, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Free Big SFS Pool, Count(0x%x).", + v_xchg_mgr->st_big_sfs_pool.free_count); + + unf_free_all_big_sfs(v_xchg_mgr); + v_xchg_mgr->st_big_sfs_pool.free_count = 0; + + if (v_xchg_mgr->st_big_sfs_pool.big_sfs_pool) { + vfree(v_xchg_mgr->st_big_sfs_pool.big_sfs_pool); + v_xchg_mgr->st_big_sfs_pool.big_sfs_pool = NULL; + } +} + +static void unf_free_xchg_mgr_mem(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int i = 0; + unsigned int xchg_sum = 0; + struct unf_xchg_free_pool_s *free_pool = NULL; + + UNF_CHECK_VALID(0x893, UNF_TRUE, v_xchg_mgr, return); + + xchg_mgr = v_xchg_mgr; + + /* Release the reserved Rsp IU Page */ + unf_free_all_rsp_pages(xchg_mgr); + + unf_free_big_sfs_pool(xchg_mgr); + + /* The sfs is released first, and the XchgMgr is allocated + * by the get free page. + * Therefore, the XchgMgr is compared with the '0' + */ + if (xchg_mgr->sfs_mm_start != 0) { + dma_free_coherent(&v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + } + + /* Release Xchg first */ + if (xchg_mgr->fcp_mm_start) { + unf_get_xchg_config_sum(v_lport, &xchg_sum); + xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM; + + xchg = xchg_mgr->fcp_mm_start; + for (i = 0; i < xchg_sum; i++) { + if (!xchg) + break; + xchg++; + } + + vfree(xchg_mgr->fcp_mm_start); + xchg_mgr->fcp_mm_start = NULL; + } + + /* release the hot pool */ + if (xchg_mgr->hot_pool) { + vfree(xchg_mgr->hot_pool); + xchg_mgr->hot_pool = NULL; + } + + free_pool = &xchg_mgr->free_pool; + + vfree(xchg_mgr); + + UNF_REFERNCE_VAR(xchg_mgr); + UNF_REFERNCE_VAR(free_pool); +} + +static void unf_free_xchg_mgr(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x894, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x895, UNF_TRUE, v_xchg_mgr, return); + + /* 1. At first, free exchanges for this Exch_Mgr */ + ret = unf_free_lport_xchg(v_lport, v_xchg_mgr); + + /* 2. Delete this Exch_Mgr entry */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_del_init(&v_xchg_mgr->xchg_mgr_entry); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + /* 3. free Exch_Mgr memory if necessary */ + if (ret == RETURN_OK) { + /* free memory directly */ + unf_free_xchg_mgr_mem(v_lport, v_xchg_mgr); + } else { + /* Add it to Dirty list */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_add_tail(&v_xchg_mgr->xchg_mgr_entry, + &v_lport->list_dirty_xchg_mgr_head); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + /* Mark dirty flag */ + unf_cmmark_dirty_mem(v_lport, + UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY); + } +} + +void unf_free_all_xchg_mgr(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x896, UNF_TRUE, v_lport, return); + + /* for each L_Port->Exch_Mgr_List */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + while (!list_empty(&v_lport->list_xchg_mgr_head)) { + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + unf_free_xchg_mgr(v_lport, xchg_mgr); + if (i < UNF_EXCHG_MGR_NUM) + v_lport->p_xchg_mgr[i] = NULL; + + i++; + /* go to next */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + } + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR; +} + +static unsigned int unf_init_xchg_mgr(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x897, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + xchg_mgr = v_xchg_mgr; + memset(xchg_mgr, 0, sizeof(struct unf_xchg_mgr_s)); + + INIT_LIST_HEAD(&xchg_mgr->xchg_mgr_entry); + xchg_mgr->mgr_type = UNF_XCHG_MGR_FC; + xchg_mgr->min_xid = UNF_XCHG_MIN_XID; + xchg_mgr->max_xid = UNF_XCHG_MAX_XID; + xchg_mgr->fcp_mm_start = NULL; + xchg_mgr->mem_size = sizeof(struct unf_xchg_mgr_s); + return RETURN_OK; +} + +static unsigned int unf_init_xchg_mgr_free_pool( + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x898, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + xchg_mgr = v_xchg_mgr; + + free_pool = &xchg_mgr->free_pool; + INIT_LIST_HEAD(&free_pool->list_free_xchg_list); + INIT_LIST_HEAD(&free_pool->list_sfs_xchg_list); + spin_lock_init(&free_pool->xchg_free_pool_lock); + free_pool->fcp_xchg_sum = 0; + free_pool->xchg_mgr_completion = NULL; + + return RETURN_OK; +} + +static unsigned int unf_init_xchg_hot_pool( + struct unf_lport_s *v_lport, + struct unf_xchg_hot_pool_s *v_hot_pool, + unsigned int v_xchg_sum) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + + UNF_CHECK_VALID(0x899, UNF_TRUE, v_hot_pool, return UNF_RETURN_ERROR); + hot_pool = v_hot_pool; + + INIT_LIST_HEAD(&hot_pool->sfs_busylist); + INIT_LIST_HEAD(&hot_pool->ini_busylist); + spin_lock_init(&hot_pool->xchg_hot_pool_lock); + INIT_LIST_HEAD(&hot_pool->list_destroy_xchg); + hot_pool->total_xchges = 0; + hot_pool->total_res_cnt = 0; + hot_pool->wait_state = UNF_FALSE; + hot_pool->lport = v_lport; + + /* Slab Pool Index */ + hot_pool->slab_next_index = 0; + UNF_TOU16_CHECK(hot_pool->slab_total_sum, v_xchg_sum, + return UNF_RETURN_ERROR); + + return RETURN_OK; +} + +static unsigned int unf_alloc_and_init_big_sfs_pool( + struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned int i = 0; + unsigned int size = 0; + unsigned int align_size = 0; + unsigned int npiv_cnt = 0; + struct unf_big_sfs_pool_s *big_sfs_pool = NULL; + struct unf_big_sfs_s *big_sfs_buf = NULL; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int buf_cnt_perhugebuf; + unsigned int alloc_idx; + unsigned int curbuf_idx = 0; + unsigned int curbuf_offset = 0; + + UNF_CHECK_VALID(0x900, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x901, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + big_sfs_pool = &v_xchg_mgr->st_big_sfs_pool; + + INIT_LIST_HEAD(&big_sfs_pool->list_free_pool); + INIT_LIST_HEAD(&big_sfs_pool->list_busy_pool); + spin_lock_init(&big_sfs_pool->big_sfs_pool_lock); + npiv_cnt = v_lport->low_level_func.support_max_npiv_num; + + /* + * The value*6 indicates GID_PT/GID_FT, RSCN, and ECHO + * Another command is received when a command is being responded + * A maximum of 20 resources are reserved for the RSCN. + * During the test, multiple rscn are found. As a result, + * the resources are insufficient and the disc fails. + */ + big_sfs_pool->free_count = (npiv_cnt + 1) * 6 + 20; + big_sfs_buf = (struct unf_big_sfs_s *)vmalloc( + big_sfs_pool->free_count + * sizeof(struct unf_big_sfs_s)); + if (!big_sfs_buf) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Allocate Big SFS buf fail."); + + return UNF_RETURN_ERROR; + } + memset(big_sfs_buf, 0, big_sfs_pool->free_count * + sizeof(struct unf_big_sfs_s)); + v_xchg_mgr->mem_size += + (unsigned int) + (big_sfs_pool->free_count * sizeof(struct unf_big_sfs_s)); + big_sfs_pool->big_sfs_pool = (void *)big_sfs_buf; + + /* + * Use the larger value of sizeof (struct unf_gif_acc_pld_s) and + * sizeof (struct unf_rscn_pld_s) to avoid the icp error.Therefore, + * the value is directly assigned instead of being compared. + */ + size = sizeof(struct unf_gif_acc_pld_s); + align_size = ALIGN(size, PAGE_SIZE); + + buf_total_size = align_size * big_sfs_pool->free_count; + + v_xchg_mgr->big_sfs_buf_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? + BUF_LIST_PAGE_SIZE : buf_total_size; + buf_cnt_perhugebuf = + v_xchg_mgr->big_sfs_buf_list.buf_size / align_size; + buf_num = + big_sfs_pool->free_count % buf_cnt_perhugebuf ? + big_sfs_pool->free_count / buf_cnt_perhugebuf + 1 : + big_sfs_pool->free_count / buf_cnt_perhugebuf; + + v_xchg_mgr->big_sfs_buf_list.buflist = + (struct buff_list_s *)kmalloc( + buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + v_xchg_mgr->big_sfs_buf_list.buf_num = buf_num; + + if (!v_xchg_mgr->big_sfs_buf_list.buflist) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate BigSfs pool buf list failed out of memory"); + goto free_buff; + } + memset(v_xchg_mgr->big_sfs_buf_list.buflist, 0, buf_num * + sizeof(struct buff_list_s)); + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr = + kmalloc(v_xchg_mgr->big_sfs_buf_list.buf_size, + GFP_ATOMIC); + if (!v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr) + goto free_buff; + + memset(v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr, + 0, v_xchg_mgr->big_sfs_buf_list.buf_size); + } + + for (i = 0; i < big_sfs_pool->free_count; i++) { + if ((i != 0) && !(i % buf_cnt_perhugebuf)) + curbuf_idx++; + + curbuf_offset = align_size * (i % buf_cnt_perhugebuf); + big_sfs_buf->vaddr = + v_xchg_mgr->big_sfs_buf_list.buflist[curbuf_idx].vaddr + + curbuf_offset; + big_sfs_buf->size = size; + v_xchg_mgr->mem_size += size; + list_add_tail(&big_sfs_buf->entry_big_sfs, + &big_sfs_pool->list_free_pool); + big_sfs_buf++; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[EVENT]Allocate BigSfs pool size:%d,uiAlignSize:%d,buf_num:%d,buf_size:%d", + size, align_size, v_xchg_mgr->big_sfs_buf_list.buf_num, + v_xchg_mgr->big_sfs_buf_list.buf_size); + return RETURN_OK; +free_buff: + unf_free_all_big_sfs(v_xchg_mgr); + vfree(big_sfs_buf); + big_sfs_pool->big_sfs_pool = NULL; + return UNF_RETURN_ERROR; +} + +/* + * Function Name : unf_free_one_big_sfs + * Function Description: Put the big sfs memory in xchg back to bigsfspool + * Input Parameters : struct unf_xchg_s * v_xchg + * Output Parameters : N/A + * Return Type : static void + */ +static void unf_free_one_big_sfs(struct unf_xchg_s *v_xchg) +{ + unsigned long flag = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x902, UNF_TRUE, v_xchg, return); + xchg_mgr = v_xchg->xchg_mgr; + UNF_CHECK_VALID(0x903, UNF_TRUE, xchg_mgr, return); + if (!v_xchg->big_sfs_buf) + return; + + if ((v_xchg->cmnd_code != NS_GID_PT) && + (v_xchg->cmnd_code != NS_GID_FT) && + (v_xchg->cmnd_code != ELS_ECHO) && + (UNF_SET_ELS_ACC_TYPE(ELS_ECHO) != v_xchg->cmnd_code) && + (v_xchg->cmnd_code != ELS_RSCN) && + (UNF_SET_ELS_ACC_TYPE(ELS_RSCN) != v_xchg->cmnd_code)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "Exchange(0x%p), Command(0x%x) big SFS buf is not NULL.", + v_xchg, v_xchg->cmnd_code); + } + + spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag); + list_del(&v_xchg->big_sfs_buf->entry_big_sfs); + list_add_tail(&v_xchg->big_sfs_buf->entry_big_sfs, + &xchg_mgr->st_big_sfs_pool.list_free_pool); + xchg_mgr->st_big_sfs_pool.free_count++; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Free one big SFS buf(0x%p), Count(0x%x), Exchange(0x%p), Command(0x%x).", + v_xchg->big_sfs_buf->vaddr, + xchg_mgr->st_big_sfs_pool.free_count, + v_xchg, v_xchg->cmnd_code); + spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); +} + +static void unf_free_exchg_mgr_info(struct unf_lport_s *v_lport) +{ + unsigned int i; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_for_each_safe(node, next_node, &v_lport->list_xchg_mgr_head) { + list_del(node); + xchg_mgr = list_entry(node, struct unf_xchg_mgr_s, + xchg_mgr_entry); + } + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = v_lport->p_xchg_mgr[i]; + + if (xchg_mgr) { + unf_free_big_sfs_pool(xchg_mgr); + unf_free_all_rsp_pages(xchg_mgr); + + if (xchg_mgr->sfs_mm_start) { + dma_free_coherent( + &v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + } + + if (xchg_mgr->fcp_mm_start) { + vfree(xchg_mgr->fcp_mm_start); + xchg_mgr->fcp_mm_start = NULL; + } + + if (xchg_mgr->hot_pool) { + vfree(xchg_mgr->hot_pool); + xchg_mgr->hot_pool = NULL; + } + + vfree(xchg_mgr); + v_lport->p_xchg_mgr[i] = NULL; + } + } +} + +static unsigned int unf_alloc_and_init_xchg_mgr(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg_mem = NULL; + void *sfs_mm_start = 0; + dma_addr_t sfs_phy_addr = 0; + unsigned int xchg_sum = 0; + unsigned int sfs_xchg_sum = 0; + unsigned long flags = 0; + unsigned int order = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int slab_num = 0; + unsigned int i = 0; + + UNF_REFERNCE_VAR(order); + /* SFS_EXCH + I/O_EXCH */ + ret = unf_get_xchg_config_sum(v_lport, &xchg_sum); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) can't get Exchange.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* SFS Exchange Sum */ + sfs_xchg_sum = v_lport->low_level_func.lport_cfg_items.max_sfs_xchg / + UNF_EXCHG_MGR_NUM; + + xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM; + slab_num = v_lport->low_level_func.support_max_xid_range / + UNF_EXCHG_MGR_NUM; + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + /* Alloc Exchange Manager */ + xchg_mgr = (struct unf_xchg_mgr_s *) + vmalloc(sizeof(struct unf_xchg_mgr_s)); + if (!xchg_mgr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate Exchange Manager Memory Fail.", + v_lport->port_id); + + goto exit; + } + + /* Init Exchange Manager */ + ret = unf_init_xchg_mgr(xchg_mgr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) initialization Exchange Manager unsuccessful.", + v_lport->port_id); + + goto free_xchg_mgr; + } + + /* Initialize the Exchange Free Pool resource */ + ret = unf_init_xchg_mgr_free_pool(xchg_mgr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) initialization Exchange Manager Free Pool unsuccessful.", + v_lport->port_id); + + goto free_xchg_mgr; + } + + /* Allocate memory for Hot Pool and Xchg slab */ + hot_pool = vmalloc(sizeof(struct unf_xchg_hot_pool_s) + + sizeof(struct unf_xchg_s *) * slab_num); + if (!hot_pool) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate Hot Pool Memory Fail.", + v_lport->port_id); + goto free_xchg_mgr; + } + + memset(hot_pool, 0, + sizeof(struct unf_xchg_hot_pool_s) + + sizeof(struct unf_xchg_s *) * slab_num); + xchg_mgr->mem_size += + (unsigned int)(sizeof(struct unf_xchg_hot_pool_s) + + sizeof(struct unf_xchg_s *) * slab_num); + + /* Initialize the Exchange Hot Pool resource */ + ret = unf_init_xchg_hot_pool(v_lport, hot_pool, slab_num); + if (ret != RETURN_OK) + goto free_hot_pool; + + hot_pool->base += (unsigned short)(i * slab_num); + /* Allocate the memory of all Xchg (IO/SFS) */ + xchg_mem = vmalloc(sizeof(struct unf_xchg_s) * xchg_sum); + if (!xchg_mem) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate Exchange Memory Fail.", + v_lport->port_id); + goto free_hot_pool; + } + memset(xchg_mem, 0, sizeof(struct unf_xchg_s) * xchg_sum); + xchg_mgr->mem_size += + (unsigned int)(sizeof(struct unf_xchg_s) * xchg_sum); + + xchg_mgr->hot_pool = hot_pool; + xchg_mgr->fcp_mm_start = xchg_mem; + + /* Allocate the memory used by the SFS Xchg + * to carry the ELS/BLS/GS command and response + */ + xchg_mgr->sfs_mem_size = + (unsigned int)(sizeof(union unf_sfs_u) * sfs_xchg_sum); + + /* Apply for the DMA space for sending sfs frames. + * If the value of DMA32 is less than 4 GB, + * cross-4G problems will not occur + */ + order = (unsigned int)get_order(xchg_mgr->sfs_mem_size); + + sfs_mm_start = dma_alloc_coherent( + &v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + &sfs_phy_addr, GFP_KERNEL); + if (!sfs_mm_start) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) Get Free Pagers Fail, Order(%u).", + v_lport->port_id, order); + goto free_xchg_mem; + } + memset(sfs_mm_start, 0, sizeof(union unf_sfs_u) * sfs_xchg_sum); + xchg_mgr->mem_size += xchg_mgr->sfs_mem_size; + xchg_mgr->sfs_mm_start = sfs_mm_start; + xchg_mgr->sfs_phy_addr = sfs_phy_addr; + + /* The Xchg is initialized and mounted to the Free Pool */ + ret = unf_init_xchg(v_lport, xchg_mgr, xchg_sum, sfs_xchg_sum); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) initialization Exchange unsuccessful, Exchange Number(%u), SFS Exchange number(%u).", + v_lport->port_id, xchg_sum, sfs_xchg_sum); + dma_free_coherent(&v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + goto free_xchg_mem; + } + + /* Apply for the memory used by GID_PT, GID_FT, and RSCN */ + ret = unf_alloc_and_init_big_sfs_pool(v_lport, xchg_mgr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate big SFS fail", + v_lport->port_id); + + unf_free_all_rsp_pages(xchg_mgr); + dma_free_coherent(&v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + goto free_xchg_mem; + } + + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + v_lport->p_xchg_mgr[i] = (void *)xchg_mgr; + list_add_tail(&xchg_mgr->xchg_mgr_entry, + &v_lport->list_xchg_mgr_head); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) ExchangeMgr:(0x%p),Base:(0x%x).", + v_lport->port_id, v_lport->p_xchg_mgr[i], + hot_pool->base); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) allocate Exchange Manager size(0x%x).", + v_lport->port_id, xchg_mgr->mem_size); + + return RETURN_OK; + +free_xchg_mem: + vfree(xchg_mem); +free_hot_pool: + vfree(hot_pool); +free_xchg_mgr: + vfree(xchg_mgr); +exit: + unf_free_exchg_mgr_info(v_lport); + return UNF_RETURN_ERROR; +} + +void unf_xchg_mgr_destroy(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x905, UNF_TRUE, v_lport, return); + + unf_free_all_xchg_mgr(v_lport); +} + +unsigned int unf_alloc_xchg_resource(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x906, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + INIT_LIST_HEAD(&v_lport->list_dirty_xchg_mgr_head); + INIT_LIST_HEAD(&v_lport->list_xchg_mgr_head); + spin_lock_init(&v_lport->xchg_mgr_lock); + + /* LPort Xchg Management Unit Allocation */ + if (unf_alloc_and_init_xchg_mgr(v_lport) != RETURN_OK) + return UNF_RETURN_ERROR; + + return RETURN_OK; +} + +void unf_destroy_dirty_xchg(struct unf_lport_s *v_lport, int v_show_only) +{ + unsigned int dirty_xchg = 0; + struct unf_xchg_mgr_s *exch_mgr = NULL; + unsigned long flags = 0; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + + UNF_CHECK_VALID(0x908, UNF_TRUE, v_lport, return); + + if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) { + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_for_each_safe(node, next_node, + &v_lport->list_dirty_xchg_mgr_head) { + exch_mgr = list_entry(node, struct unf_xchg_mgr_s, + xchg_mgr_entry); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + if (exch_mgr) { + dirty_xchg = + (exch_mgr->free_pool.total_fcp_xchg + + exch_mgr->free_pool.total_sfs_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) has %u dirty exchange(s)", + v_lport->port_id, dirty_xchg); + + unf_show_all_xchg(v_lport, exch_mgr); + + if (v_show_only == UNF_FALSE) { + /* Delete Dirty Exchange Mgr entry */ + spin_lock_irqsave( + &v_lport->xchg_mgr_lock, + flags); + list_del_init( + &exch_mgr->xchg_mgr_entry); + spin_unlock_irqrestore( + &v_lport->xchg_mgr_lock, + flags); + + /* Free Dirty Exchange Mgr memory */ + unf_free_xchg_mgr_mem(v_lport, + exch_mgr); + } + } + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + } + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + } + + UNF_REFERNCE_VAR(dirty_xchg); +} + +struct unf_xchg_mgr_s *unf_get_xchg_mgr_by_lport(struct unf_lport_s *v_lport, + unsigned int v_idx) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x909, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x910, UNF_TRUE, v_idx < UNF_EXCHG_MGR_NUM, + return NULL); + + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + xchg_mgr = v_lport->p_xchg_mgr[v_idx]; + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + return xchg_mgr; +} + +struct unf_xchg_hot_pool_s *unf_get_hot_pool_by_lport( + struct unf_lport_s *v_lport, + unsigned int v_mgr_idx) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x910, UNF_TRUE, (v_lport), return NULL); + + lport = (struct unf_lport_s *)(v_lport->root_lport); + + UNF_CHECK_VALID(0x910, UNF_TRUE, (lport), return NULL); + + /* Get Xchg Manager */ + xchg_mgr = unf_get_xchg_mgr_by_lport(lport, v_mgr_idx); + if (!xchg_mgr) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) Exchange Manager is NULL.", + lport->port_id); + + return NULL; + } + + /* Get Xchg Manager Hot Pool */ + return xchg_mgr->hot_pool; +} + +static inline void unf_hot_pool_slab_set( + struct unf_xchg_hot_pool_s *v_hot_pool, + unsigned short v_slab_index, + struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x911, UNF_TRUE, v_hot_pool, return); + + v_hot_pool->xchg_slab[v_slab_index] = v_xchg; +} + +static inline struct unf_xchg_s *unf_get_xchg_by_xchg_tag( + struct unf_xchg_hot_pool_s *v_hot_pool, + unsigned short v_slab_index) +{ + UNF_CHECK_VALID(0x912, UNF_TRUE, v_hot_pool, return NULL); + + return v_hot_pool->xchg_slab[v_slab_index]; +} + +static void *unf_lookup_xchg_by_tag(void *v_lport, + unsigned short v_hot_pool_tag) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned int exchg_mgr_idx = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x913, UNF_TRUE, v_lport, return NULL); + + /* In the case of NPIV, v_pstLport is the Vport pointer, + * the share uses the ExchMgr of RootLport + */ + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x914, UNF_TRUE, lport, return NULL); + + exchg_mgr_idx = (v_hot_pool_tag * UNF_EXCHG_MGR_NUM) / + lport->low_level_func.support_max_xid_range; + if (unlikely(exchg_mgr_idx >= UNF_EXCHG_MGR_NUM)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) Get ExchgMgr %u err", + lport->port_id, exchg_mgr_idx); + + return NULL; + } + + xchg_mgr = lport->p_xchg_mgr[exchg_mgr_idx]; + + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) ExchgMgr %u is null", + lport->port_id, exchg_mgr_idx); + + return NULL; + } + + hot_pool = xchg_mgr->hot_pool; + + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) Hot Pool is NULL.", lport->port_id); + + return NULL; + } + + if (unlikely(v_hot_pool_tag >= + (hot_pool->slab_total_sum + hot_pool->base))) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]LPort(0x%x) can't Input Tag(0x%x), Max(0x%x).", + lport->port_id, v_hot_pool_tag, + (hot_pool->slab_total_sum + hot_pool->base)); + + return NULL; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + xchg = unf_get_xchg_by_xchg_tag(hot_pool, + v_hot_pool_tag - hot_pool->base); + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + return (void *)xchg; +} + +static void *unf_find_xchg_by_oxid(void *v_lport, unsigned short v_oxid, + unsigned int v_oid) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + unsigned long xchg_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x915, UNF_TRUE, (v_lport), return NULL); + + /* In the case of NPIV, the v_lport is the Vport pointer, + * and the share uses the ExchMgr of the RootLport + */ + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x916, UNF_TRUE, (lport), return NULL); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) MgrIdex %u Hot Pool is NULL.", + lport->port_id, i); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + + /* 1. Traverse sfs_busy list */ + list_for_each_safe(node, next_node, &hot_pool->sfs_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags); + if (UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg)) { + atomic_inc(&xchg->ref_cnt); + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, flags); + return xchg; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + } + + /* 2. Traverse INI_Busy List */ + list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags); + if (UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg)) { + atomic_inc(&xchg->ref_cnt); + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, flags); + return xchg; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } + + return NULL; +} + +static inline int unf_check_xchg_matched(struct unf_xchg_s *xchg, + unsigned long long v_command_sn, + unsigned int v_world_id) +{ + int matched = 0; + + matched = (v_command_sn == xchg->cmnd_sn); + if (matched && (atomic_read(&xchg->ref_cnt) > 0)) + return UNF_TRUE; + else + return UNF_FALSE; +} + +static void *unf_lookup_xchg_by_cmnd_sn(void *v_lport, + unsigned long long v_command_sn, + unsigned int v_world_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x919, UNF_TRUE, v_lport, return NULL); + + /* In NPIV, v_lport is a Vport pointer, and idle resources are + * shared by ExchMgr of RootLport. + * However, busy resources are mounted on each vport. + * Therefore, vport needs to be used. + */ + lport = (struct unf_lport_s *)v_lport; + UNF_CHECK_VALID(0x920, UNF_TRUE, lport, return NULL); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + lport->port_id); + + continue; + } + + /* from busy_list */ + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + if (unf_check_xchg_matched(xchg, v_command_sn, + v_world_id)) { + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, flags); + + return xchg; + } + } + + /* vport: from destroy_list */ + if (lport != lport->root_lport) { + list_for_each_safe(node, next_node, + &hot_pool->list_destroy_xchg) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + if (unf_check_xchg_matched(xchg, v_command_sn, + v_world_id)) { + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) lookup exchange from destroy list", + lport->port_id); + + return xchg; + } + } + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } + + return NULL; +} + +static inline unsigned int unf_alloc_hot_pool_slab( + struct unf_xchg_hot_pool_s *v_hot_pool, + struct unf_xchg_s *v_xchg, + unsigned short v_rx_id) +{ + unsigned short slab_index = 0; + + UNF_CHECK_VALID(0x921, UNF_TRUE, v_hot_pool, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x922, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + /* Check whether the hotpool tag is in the specified range sirt. + * If yes, set up the management relationship. If no, + * handle the problem according to the normal IO. + * If the sirt digitmap is used but the tag is occupied, + * it indicates that the I/O is discarded. + */ + + v_hot_pool->slab_next_index = + (unsigned short)v_hot_pool->slab_next_index; + + slab_index = v_hot_pool->slab_next_index; + while (unf_get_xchg_by_xchg_tag(v_hot_pool, slab_index)) { + slab_index++; + slab_index = slab_index % v_hot_pool->slab_total_sum; + + /* Rewind occurs */ + if (slab_index == v_hot_pool->slab_next_index) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "There is No Slab At Hot Pool(0x%p) for xchg(0x%p).", + v_hot_pool, v_xchg); + + return UNF_RETURN_ERROR; + } + } + + unf_hot_pool_slab_set(v_hot_pool, slab_index, v_xchg); + v_xchg->hot_pool_tag = slab_index + v_hot_pool->base; + slab_index++; + v_hot_pool->slab_next_index = + slab_index % v_hot_pool->slab_total_sum; + return RETURN_OK; +} + +struct unf_esgl_page_s *unf_get_one_free_esgl_page(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_esgl_s *esgl = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *list_head = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x923, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x924, UNF_TRUE, v_xchg, return NULL); + + lport = v_lport; + xchg = v_xchg; + + /* Obtain a new Esgl from the EsglPool and + * add it to the list_esgls of the Xchg + */ + spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag); + if (!list_empty(&lport->esgl_pool.list_esgl_pool)) { + list_head = (&lport->esgl_pool.list_esgl_pool)->next; + list_del(list_head); + lport->esgl_pool.esgl_pool_count--; + list_add_tail(list_head, &xchg->list_esgls); + + esgl = list_entry(list_head, struct unf_esgl_s, entry_esgl); + atomic_inc(&xchg->esgl_cnt); + spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) esgl pool is empty", + lport->nport_id); + + spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); + return NULL; + } + + return &esgl->page; +} + +void unf_release_esgls(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x925, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x926, UNF_TRUE, v_xchg->lport, return); + + if (atomic_read(&v_xchg->esgl_cnt) <= 0) + return; + + /* In the case of NPIV, the Vport pointer is saved in v_pstExch, + * and the EsglPool of RootLport is shared. + */ + lport = (v_xchg->lport)->root_lport; + UNF_CHECK_VALID(0x927, UNF_TRUE, (lport), return); + + spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag); + if (!list_empty(&v_xchg->list_esgls)) { + list_for_each_safe(list, list_tmp, &v_xchg->list_esgls) { + list_del(list); + list_add_tail(list, &lport->esgl_pool.list_esgl_pool); + lport->esgl_pool.esgl_pool_count++; + atomic_dec(&v_xchg->esgl_cnt); + } + } + spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); +} + +static void unf_init_xchg_attribute(struct unf_xchg_s *v_xchg) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x973, UNF_TRUE, (v_xchg), return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + v_xchg->xchg_mgr = NULL; + v_xchg->free_pool = NULL; + v_xchg->hot_pool = NULL; + v_xchg->lport = NULL; + v_xchg->rport = NULL; + v_xchg->disc_rport = NULL; + v_xchg->io_state = UNF_IO_STATE_NEW; + v_xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE; + v_xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID; + v_xchg->io_send_abort = UNF_FALSE; + v_xchg->io_abort_result = UNF_FALSE; + v_xchg->abts_state = 0; + v_xchg->ox_id = INVALID_VALUE16; + v_xchg->abort_oxid = INVALID_VALUE16; + v_xchg->rx_id = INVALID_VALUE16; + v_xchg->sid = INVALID_VALUE32; + v_xchg->did = INVALID_VALUE32; + v_xchg->oid = INVALID_VALUE32; + v_xchg->disc_port_id = INVALID_VALUE32; + v_xchg->seq_id = INVALID_VALUE8; + v_xchg->cmnd_code = INVALID_VALUE32; + v_xchg->cmnd_sn = INVALID_VALUE64; + v_xchg->data_len = 0; + v_xchg->resid_len = 0; + v_xchg->data_direction = DMA_NONE; + v_xchg->hot_pool_tag = INVALID_VALUE16; + v_xchg->big_sfs_buf = NULL; + v_xchg->may_consume_res_cnt = 0; + v_xchg->fact_consume_res_cnt = 0; + v_xchg->io_front_jif = INVALID_VALUE64; + v_xchg->ob_callback_sts = UNF_IO_SUCCESS; + v_xchg->start_jif = 0; + v_xchg->rport_bind_jifs = INVALID_VALUE64; + v_xchg->scsi_id = INVALID_VALUE32; + v_xchg->world_id = INVALID_VALUE32; + + memset(&v_xchg->seq, 0, sizeof(struct unf_seq_s)); + memset(&v_xchg->fcp_cmnd, 0, sizeof(struct unf_fcp_cmnd_s)); + memset(&v_xchg->scsi_cmnd_info, 0, sizeof(struct unf_scsi_cmd_info_s)); + memset(&v_xchg->abts_rsps, 0, sizeof(struct unf_abts_rsps_s)); + memset(&v_xchg->dif_info, 0, sizeof(struct dif_info_s)); + memset(v_xchg->private, 0, + (PKG_MAX_PRIVATE_DATA_SIZE * sizeof(unsigned int))); + v_xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK; + v_xchg->echo_info.response_time = 0; + + if (v_xchg->xchg_type == UNF_XCHG_TYPE_INI) { + if (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu) + memset(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, + 0, sizeof(struct unf_fcprsp_iu_s)); + } else if (v_xchg->xchg_type == UNF_XCHG_TYPE_SFS) { + if (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + memset(v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + 0, sizeof(union unf_sfs_u)); + v_xchg->fcp_sfs_union.sfs_entry.cur_offset = 0; + } + } else { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Exchange Type(0x%x) SFS Union uninited.", + v_xchg->xchg_type); + } + v_xchg->xchg_type = UNF_XCHG_TYPE_INVALID; + v_xchg->pfn_ob_callback = NULL; + v_xchg->pfn_callback = NULL; + v_xchg->pfn_free_xchg = NULL; + + atomic_set(&v_xchg->ref_cnt, 0); + atomic_set(&v_xchg->esgl_cnt, 0); + atomic_set(&v_xchg->delay_flag, 0); + + if (delayed_work_pending(&v_xchg->timeout_work)) + UNF_DEL_XCHG_TIMER_SAFE(v_xchg); + + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); +} + +static void unf_add_back_to_fcp_list( + struct unf_xchg_free_pool_s *v_free_pool, + struct unf_xchg_s *v_xchg) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x928, UNF_TRUE, v_free_pool, return); + UNF_CHECK_VALID(0x929, UNF_TRUE, v_xchg, return); + + unf_init_xchg_attribute(v_xchg); + + /* The released I/O resources are added to + * the queue tail to facilitate fault locating + */ + spin_lock_irqsave(&v_free_pool->xchg_free_pool_lock, flags); + list_add_tail(&v_xchg->list_xchg_entry, + &v_free_pool->list_free_xchg_list); + v_free_pool->total_fcp_xchg++; + spin_unlock_irqrestore(&v_free_pool->xchg_free_pool_lock, flags); +} + +static void unf_check_xchg_mgr_status(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned long flags = 0; + unsigned int total_xchg = 0; + unsigned int total_xchg_sum = 0; + + UNF_CHECK_VALID(0x930, UNF_TRUE, v_xchg_mgr, return); + + spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, flags); + + total_xchg = v_xchg_mgr->free_pool.total_fcp_xchg + + v_xchg_mgr->free_pool.total_sfs_xchg; + total_xchg_sum = v_xchg_mgr->free_pool.fcp_xchg_sum + + v_xchg_mgr->free_pool.sfs_xchg_sum; + + if ((v_xchg_mgr->free_pool.xchg_mgr_completion) && + (total_xchg == total_xchg_sum)) { + complete(v_xchg_mgr->free_pool.xchg_mgr_completion); + } + spin_unlock_irqrestore(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + flags); +} + +static void unf_free_fcp_xchg(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x932, UNF_TRUE, v_xchg, return); + + /* Releasing a Specified INI I/O and Invoking the scsi_done Process */ + unf_done_ini_xchg(v_xchg); + free_pool = v_xchg->free_pool; + xchg_mgr = v_xchg->xchg_mgr; + lport = v_xchg->lport; + rport = v_xchg->rport; + + atomic_dec(&rport->pending_io_cnt); + /* Release the Esgls in the Xchg structure and + * return it to the EsglPool of the Lport + */ + unf_release_esgls(v_xchg); + + /* Mount I/O resources to the FCP Free linked list */ + unf_add_back_to_fcp_list(free_pool, v_xchg); + + /* The Xchg is released synchronously and then forcibly released to + * prevent the Xchg from accessing the Xchg in the normal I/O process + */ + if (unlikely(lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(xchg_mgr); +} + +static void unf_fc_abort_timeout_cmnd(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = v_lport; + struct unf_xchg_s *xchg = v_xchg; + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + unsigned long flag = 0; + unsigned int timeout_value = 2000; + unsigned int return_value = 0; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + + UNF_CHECK_VALID(0x936, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x937, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + if (v_xchg->io_state & INI_IO_STATE_UPABORT) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "LPort(0x%x) xchange(0x%p) OX_ID(0x%x), RX_ID(0x%x) Cmdsn(0x%lx) has been aborted.", + lport->port_id, v_xchg, v_xchg->ox_id, + v_xchg->rx_id, (unsigned long)v_xchg->cmnd_sn); + return; + } + v_xchg->io_state |= INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_NORMAL, UNF_KEVENT, + "LPort(0x%x) exchg(0x%p) OX_ID(0x%x) RX_ID(0x%x) Cmdsn(0x%lx) timeout abort it", + lport->port_id, v_xchg, v_xchg->ox_id, + v_xchg->rx_id, (unsigned long)v_xchg->cmnd_sn); + + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)v_xchg, + (unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT, + UNF_TIMER_TYPE_INI_ABTS); + + sema_init(&v_xchg->task_sema, 0); + + scsi_cmnd.scsi_id = xchg->scsi_cmnd_info.scsi_id; + scsi_cmnd.upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd; + scsi_cmnd.pfn_done = xchg->scsi_cmnd_info.pfn_done; + scsi_image_table = &lport->rport_scsi_table; + + if (unf_send_abts(lport, v_xchg) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "LPort(0x%x) send ABTS, Send ABTS unsuccessful. Exchange OX_ID(0x%x), RX_ID(0x%x).", + lport->port_id, v_xchg->ox_id, + v_xchg->rx_id); + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + /* The message fails to be sent. + * It is released internally and does not + * need to be released externally. + */ + return; + } + + if (down_timeout(&v_xchg->task_sema, + (long long)msecs_to_jiffies(timeout_value))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x)", + lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->rx_id); + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + + /* Cnacel the flag of INI_IO_STATE_UPABORT + * and process the io in TMF + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + return; + } + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + if ((v_xchg->ucode_abts_state == UNF_IO_SUCCESS) || + (v_xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING)) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)", + lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->rx_id, + v_xchg->ucode_abts_state); + return_value = DID_BUS_BUSY; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_cmnd.scsi_id, + return_value); + unf_complete_cmnd(&scsi_cmnd, DID_BUS_BUSY << 16); + return; + } + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) send ABTS failed. Exch(0x%p) hot_tag(0x%x) ret(0x%x) v_xchg->io_state (0x%x)", + lport->port_id, v_xchg, v_xchg->hot_pool_tag, + v_xchg->scsi_cmnd_info.result, v_xchg->io_state); +} + +static void unf_fc_ini_send_abts_timeout(struct unf_lport_s *lport, + struct unf_rport_s *rport, + struct unf_xchg_s *xchg) +{ + if (xchg->rport_bind_jifs == rport->rport_alloc_jifs && + xchg->rport_bind_jifs != INVALID_VALUE64) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) first time to send abts timeout, retry again OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, xchg->io_state); + + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + (unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT, + UNF_TIMER_TYPE_INI_ABTS); + + if (unf_send_abts(lport, xchg) != RETURN_OK) { + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)xchg); + + unf_abts_timeout_recovery_default(rport, xchg); + + unf_cm_free_xchg(lport, xchg); + } + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) rport is invalid, exchg rport jiff(0x%llx 0x%llx), free exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, xchg, + xchg->rport_bind_jifs, rport->rport_alloc_jifs, + xchg->ox_id, xchg->rx_id, xchg->io_state); + + unf_cm_free_xchg(lport, xchg); + } +} + +static void unf_fc_ini_io_rec_wait_timeout(struct unf_lport_s *lport, + struct unf_rport_s *rport, + struct unf_xchg_s *xchg) +{ + unsigned long io_time_out = 0; + + if (xchg->rport_bind_jifs == rport->rport_alloc_jifs) { + unf_send_rec(lport, rport, xchg); + if (xchg->scsi_cmnd_info.abort_timeout > 0) { + io_time_out = + (xchg->scsi_cmnd_info.abort_timeout > + UNF_REC_TOV) ? + (xchg->scsi_cmnd_info.abort_timeout - + UNF_REC_TOV) : 0; + + if (io_time_out > 0) { + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + io_time_out, + UNF_TIMER_TYPE_REQ_IO); + } else { + unf_fc_abort_timeout_cmnd(lport, xchg); + } + } + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) Rec timeout exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x), bindjifs(0x%llx)no eqal Rport alloc jifs(0x%llx)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, + xchg->io_state, xchg->rport_bind_jifs, + rport->rport_alloc_jifs); + } +} + +static void unf_fc_ini_io_xchg_timeout(struct work_struct *v_work) +{ + struct unf_xchg_s *xchg = NULL; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int port_valid_flag = 0; + + UNF_REFERNCE_VAR(ret); + + xchg = container_of(v_work, struct unf_xchg_s, timeout_work.work); + UNF_CHECK_VALID(0x939, UNF_TRUE, xchg, return); + + ret = unf_xchg_ref_inc(xchg, INI_IO_TIMEOUT); + UNF_CHECK_VALID(0x940, UNF_TRUE, ret == RETURN_OK, return); + + lport = xchg->lport; + rport = xchg->rport; + + port_valid_flag = !lport || !rport; + if (port_valid_flag) { + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + + return; + } + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + + /* 1. for Send RRQ failed Timer timeout */ + if (INI_IO_STATE_RRQSEND_ERR & xchg->io_state) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[info]LPort(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for RRQ send failed OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, xchg->io_state); + + unf_cm_free_xchg(lport, xchg); + } + /* Second ABTS timeout and enter LOGO process */ + else if ((INI_IO_STATE_ABORT_TIMEOUT & xchg->io_state) && + (!(ABTS_RESPONSE_RECEIVED & xchg->abts_state))) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for second abts send OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, + xchg->io_state); + + unf_abts_timeout_recovery_default(rport, xchg); + + unf_cm_free_xchg(lport, xchg); + } + /* First time to send ABTS, timeout and retry to send ABTS again */ + else if ((xchg->io_state & INI_IO_STATE_UPABORT) && + (!(xchg->abts_state & ABTS_RESPONSE_RECEIVED))) { + xchg->io_state |= INI_IO_STATE_ABORT_TIMEOUT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_fc_ini_send_abts_timeout(lport, rport, xchg); + } + /* 3. IO_DONE */ + else if ((xchg->io_state & INI_IO_STATE_DONE) && + (xchg->abts_state & ABTS_RESPONSE_RECEIVED)) { + /* + * for IO_DONE: + * 1. INI ABTS first timer time out + * 2. INI RCVD ABTS Response + * 3. Normal case for I/O Done + */ + /* Send ABTS & RCVD RSP & no timeout */ + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* Send RRQ */ + if (unf_send_rrq(lport, rport, xchg) == RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "[info]LPort(0x%x) send RRQ succeed to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, xchg, + xchg->ox_id, xchg->rx_id, xchg->io_state); + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]LPort(0x%x) can't send RRQ to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, xchg, + xchg->ox_id, xchg->rx_id, xchg->io_state); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->io_state |= INI_IO_STATE_RRQSEND_ERR; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + (unsigned long)UNF_WRITE_RRQ_SENDERR_INTERVAL, + UNF_TIMER_TYPE_INI_IO); + } + } else if (xchg->io_state & INI_IO_STATE_REC_TIMEOUT_WAIT) { + xchg->io_state &= ~INI_IO_STATE_REC_TIMEOUT_WAIT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_fc_ini_io_rec_wait_timeout(lport, rport, xchg); + } else { + /* 4. I/O Timer Timeout */ + /* vmware */ + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + unf_fc_abort_timeout_cmnd(lport, xchg); + } + + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + + UNF_REFERNCE_VAR(ret); +} + +static inline struct unf_xchg_s *unf_alloc_io_xchg( + struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr, + unsigned int v_xchg_type, + unsigned short v_rx_id) +{ + struct unf_xchg_s *xchg = NULL; + struct list_head *list_node = NULL; + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long flags = 0; + static atomic64_t s_exhg_id; + + void (*unf_fc_io_xchg_timeout)(struct work_struct *v_work) = NULL; + + UNF_CHECK_VALID(0x941, UNF_TRUE, v_xchg_mgr, return NULL); + UNF_CHECK_VALID(0x942, UNF_TRUE, v_lport, return NULL); + + free_pool = &v_xchg_mgr->free_pool; + hot_pool = v_xchg_mgr->hot_pool; + UNF_CHECK_VALID(0x943, UNF_TRUE, free_pool, return NULL); + UNF_CHECK_VALID(0x944, UNF_TRUE, hot_pool, return NULL); + + /* 1. Free Pool */ + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags); + if (unlikely(list_empty(&free_pool->list_free_xchg_list))) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "Port(0x%x) have no Exchange anymore.", + v_lport->port_id); + + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + return NULL; + } + + /* Select an idle node from free pool */ + list_node = (&free_pool->list_free_xchg_list)->next; + list_del(list_node); + free_pool->total_fcp_xchg--; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + xchg = list_entry(list_node, struct unf_xchg_s, list_xchg_entry); + + /* + * Hot Pool: + * When xchg is mounted to Hot Pool, the mount mode and release mode + * of Xchg must be specified and stored in the sfs linked list. + */ + flags = 0; + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + if (unf_alloc_hot_pool_slab(hot_pool, xchg, v_rx_id) != RETURN_OK) { + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + unf_add_back_to_fcp_list(free_pool, xchg); + if (unlikely(v_lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(v_xchg_mgr); + + return NULL; + } + + list_add_tail(&xchg->list_xchg_entry, &hot_pool->ini_busylist); + unf_fc_io_xchg_timeout = unf_fc_ini_io_xchg_timeout; + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + /* 3. Exchange State */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->start_jif = atomic64_inc_return(&s_exhg_id); + xchg->xchg_mgr = v_xchg_mgr; + xchg->free_pool = free_pool; + xchg->hot_pool = hot_pool; + xchg->lport = v_lport; + xchg->xchg_type = v_xchg_type; + xchg->pfn_free_xchg = unf_free_fcp_xchg; + xchg->io_state = UNF_IO_STATE_NEW; + xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE; + xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID; + xchg->io_send_abort = UNF_FALSE; + xchg->io_abort_result = UNF_FALSE; + xchg->ox_id = INVALID_VALUE16; + xchg->abort_oxid = INVALID_VALUE16; + xchg->rx_id = INVALID_VALUE16; + xchg->sid = INVALID_VALUE32; + xchg->did = INVALID_VALUE32; + xchg->oid = INVALID_VALUE32; + xchg->seq_id = INVALID_VALUE8; + xchg->cmnd_code = INVALID_VALUE32; + xchg->data_len = 0; + xchg->resid_len = 0; + xchg->data_direction = DMA_NONE; + xchg->may_consume_res_cnt = 0; + xchg->fact_consume_res_cnt = 0; + xchg->io_front_jif = 0; + xchg->tmf_state = 0; + xchg->ucode_abts_state = INVALID_VALUE32; + xchg->abts_state = 0; + xchg->rport_bind_jifs = INVALID_VALUE64; + xchg->scsi_id = INVALID_VALUE32; + xchg->world_id = INVALID_VALUE32; + + memset(&xchg->dif_control, 0, sizeof(struct unf_dif_control_info_s)); + memset(&xchg->req_sgl_info, 0, sizeof(struct unf_req_sgl_info_s)); + memset(&xchg->dif_sgl_info, 0, sizeof(struct unf_req_sgl_info_s)); + memset(&xchg->abts_rsps, 0, sizeof(struct unf_abts_rsps_s)); + xchg->scsi_cmnd_info.result = 0; + + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int)atomic64_inc_return(&v_lport->exchg_index); + if (xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0) + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int)atomic64_inc_return(&v_lport->exchg_index); + + atomic_set(&xchg->ref_cnt, 0); + atomic_set(&xchg->delay_flag, 0); + + if (delayed_work_pending(&xchg->timeout_work)) + UNF_DEL_XCHG_TIMER_SAFE(xchg); + + INIT_DELAYED_WORK(&xchg->timeout_work, unf_fc_io_xchg_timeout); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + return xchg; +} + +static void unf_add_back_to_sfs_list( + struct unf_xchg_free_pool_s *v_free_pool, + struct unf_xchg_s *v_xchg) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x945, UNF_TRUE, v_free_pool, return); + UNF_CHECK_VALID(0x946, UNF_TRUE, v_xchg, return); + + unf_init_xchg_attribute(v_xchg); + + spin_lock_irqsave(&v_free_pool->xchg_free_pool_lock, flags); + + list_add_tail(&v_xchg->list_xchg_entry, + &v_free_pool->list_sfs_xchg_list); + v_free_pool->total_sfs_xchg++; + spin_unlock_irqrestore(&v_free_pool->xchg_free_pool_lock, flags); +} + +static void unf_free_sfs_xchg(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x947, UNF_TRUE, v_xchg, return); + + free_pool = v_xchg->free_pool; + lport = v_xchg->lport; + xchg_mgr = v_xchg->xchg_mgr; + + /* The memory is applied for when the GID_PT/GID_FT is sent. + * If no response is received, the GID_PT/GID_FT + * needs to be forcibly released. + */ + + unf_free_one_big_sfs(v_xchg); + + unf_add_back_to_sfs_list(free_pool, v_xchg); + + if (unlikely(lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(xchg_mgr); +} + +static void unf_fc_xchg_add_timer(void *v_xchg, + unsigned long v_time_ms, + enum unf_timer_type_e v_en_time_type) +{ + unsigned long flag = 0; + struct unf_xchg_s *xchg = NULL; + unsigned long time_ms = v_time_ms; + struct unf_lport_s *lport; + + UNF_CHECK_VALID(0x948, UNF_TRUE, v_xchg, return); + xchg = (struct unf_xchg_s *)v_xchg; + lport = xchg->lport; + UNF_CHECK_VALID(0x948, UNF_TRUE, lport, return); + + /* update timeout */ + switch (v_en_time_type) { + case UNF_TIMER_TYPE_INI_RRQ: + time_ms = time_ms - UNF_INI_RRQ_REDUNDANT_TIME; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_INFO, "INI RRQ Timer set."); + break; + + case UNF_TIMER_TYPE_SFS: + time_ms = time_ms + UNF_INI_ELS_REDUNDANT_TIME; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_INFO, "INI ELS Timer set."); + break; + default: + break; + } + + /* The xchg of the timer must be valid. + * If the reference count of xchg is 0, + * the timer must not be added + */ + if (atomic_read(&xchg->ref_cnt) <= 0) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[warn]Abnormal Exchange(0x%p), Reference count(0x%x), Can't add timer.", + xchg, atomic_read(&xchg->ref_cnt)); + return; + } + + /* Delay Work: Hold for timer */ + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + if (queue_delayed_work(lport->xchg_wq, + &xchg->timeout_work, + (unsigned long) + msecs_to_jiffies((unsigned int)time_ms))) { + /* hold for timer */ + atomic_inc(&xchg->ref_cnt); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); +} + +static void unf_sfs_xchg_timeout(struct work_struct *v_work) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x949, UNF_TRUE, v_work, return); + xchg = container_of(v_work, struct unf_xchg_s, timeout_work.work); + UNF_CHECK_VALID(0x950, UNF_TRUE, xchg, return); + + ret = unf_xchg_ref_inc(xchg, SFS_TIMEOUT); + UNF_REFERNCE_VAR(ret); + UNF_CHECK_VALID(0x951, UNF_TRUE, ret == RETURN_OK, return); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + lport = xchg->lport; + rport = xchg->rport; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]SFS Exch(%p) Cmnd(0x%x) IO Exch(0x%p) Sid_Did(0x%x:0x%x) HotTag(0x%x) State(0x%x) Timeout.", + xchg, xchg->cmnd_code, xchg->io_xchg, xchg->sid, + xchg->did, xchg->hot_pool_tag, xchg->io_state); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->io_state & TGT_IO_STATE_ABORT) && + (xchg->cmnd_code != ELS_RRQ) && + (xchg->cmnd_code != ELS_LOGO)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "SFS Exch(0x%p) Cmnd(0x%x) Hot Pool Tag(0x%x) timeout, but aborted, no need to handle.", + xchg, xchg->cmnd_code, xchg->hot_pool_tag); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + + return; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* The sfs times out. If the sfs is ELS reply, + * go to unf_rport_error_recovery/unf_lport_error_recovery. + * Otherwise, go to the corresponding obCallback. + */ + if (UNF_XCHG_IS_ELS_REPLY(xchg) && (rport)) { + if (rport->nport_id >= UNF_FC_FID_DOM_MGR) + unf_lport_error_recovery(lport); + else + unf_rport_error_recovery(rport); + } else if (xchg->pfn_ob_callback) { + xchg->pfn_ob_callback(xchg); + } else { + /* Do nothing */ + } + + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); +} + +static struct unf_xchg_s *unf_alloc_sfs_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr, + unsigned int v_xchg_type, + unsigned short v_rx_id) +{ + struct unf_xchg_s *xchg = NULL; + struct list_head *list_node = NULL; + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x952, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x953, UNF_TRUE, v_xchg_mgr, return NULL); + free_pool = &v_xchg_mgr->free_pool; + hot_pool = v_xchg_mgr->hot_pool; + UNF_CHECK_VALID(0x954, UNF_TRUE, free_pool, return NULL); + UNF_CHECK_VALID(0x955, UNF_TRUE, hot_pool, return NULL); + + /* Select an idle node from free pool */ + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags); + if (list_empty(&free_pool->list_sfs_xchg_list)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) have no Exchange anymore.", + v_lport->port_id); + + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + return NULL; + } + + list_node = (&free_pool->list_sfs_xchg_list)->next; + list_del(list_node); + free_pool->total_sfs_xchg--; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + xchg = list_entry(list_node, struct unf_xchg_s, list_xchg_entry); + + /* + * The xchg is mounted to the Hot Pool. + * The mount mode and release mode of the xchg must be specified + * and stored in the sfs linked list. + */ + flags = 0; + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + if (unf_alloc_hot_pool_slab(hot_pool, xchg, v_rx_id) != RETURN_OK) { + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + unf_add_back_to_sfs_list(free_pool, xchg); + if (unlikely(v_lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(v_xchg_mgr); + + return NULL; + } + + list_add_tail(&xchg->list_xchg_entry, &hot_pool->sfs_busylist); + hot_pool->total_xchges++; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->free_pool = free_pool; + xchg->hot_pool = hot_pool; + xchg->lport = v_lport; + xchg->xchg_mgr = v_xchg_mgr; + xchg->pfn_free_xchg = unf_free_sfs_xchg; + xchg->xchg_type = v_xchg_type; + xchg->io_state = UNF_IO_STATE_NEW; + xchg->scsi_cmnd_info.result = 0; + xchg->ob_callback_sts = UNF_IO_SUCCESS; + + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int)atomic64_inc_return(&v_lport->exchg_index); + if (xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0) + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int) + atomic64_inc_return(&v_lport->exchg_index); + + if (delayed_work_pending(&xchg->timeout_work)) + UNF_DEL_XCHG_TIMER_SAFE(xchg); + + INIT_DELAYED_WORK(&xchg->timeout_work, unf_sfs_xchg_timeout); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + return xchg; +} + +static void *unf_get_new_xchg(void *v_lport, unsigned int v_xchg_type, + unsigned short v_rx_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int xchg_type = 0; + unsigned short xchg_mgr_type; + unsigned int rtry_cnt = 0; + unsigned int last_exchg_mgr_idx; + + xchg_mgr_type = (v_xchg_type >> 16); + xchg_type = v_xchg_type & 0xFFFF; + UNF_CHECK_VALID(0x956, UNF_TRUE, v_lport, return NULL); + + /* In the case of NPIV, the v_lport is the Vport pointer, + * and the share uses the ExchMgr of the RootLport. + */ + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x957, UNF_TRUE, (lport), return NULL); + + if (unlikely((atomic_read(&lport->port_no_operater_flag) == + UNF_LPORT_NOP) || + (atomic_read(&((struct unf_lport_s *)v_lport)->port_no_operater_flag) == + UNF_LPORT_NOP))) + return NULL; + + last_exchg_mgr_idx = + (unsigned int)atomic64_inc_return(&lport->last_exchg_mgr_idx); +try_next_mgr: + rtry_cnt++; + if (unlikely(rtry_cnt > UNF_EXCHG_MGR_NUM)) + return NULL; + + /* If Fixed mode,only use XchgMgr 0 */ + if (unlikely(xchg_mgr_type == UNF_XCHG_MGR_TYPE_FIXED)) + xchg_mgr = (struct unf_xchg_mgr_s *)lport->p_xchg_mgr[0]; + else + xchg_mgr = + (struct unf_xchg_mgr_s *) + lport->p_xchg_mgr[last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM]; + + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) get exchangemgr %u is null.", + lport->port_id, + last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM); + return NULL; + } + + last_exchg_mgr_idx++; + + /* Allocate entries based on the Exchange type */ + switch (xchg_type) { + case UNF_XCHG_TYPE_SFS: + xchg = unf_alloc_sfs_xchg(v_lport, xchg_mgr, xchg_type, + INVALID_VALUE16); + break; + + case UNF_XCHG_TYPE_INI: + xchg = unf_alloc_io_xchg(v_lport, xchg_mgr, xchg_type, + INVALID_VALUE16); + break; + + default: + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) unwonted, Exchange type(0x%x).", + lport->port_id, xchg_type); + break; + } + + if (likely(xchg)) { + xchg->ox_id = INVALID_VALUE16; + xchg->abort_oxid = INVALID_VALUE16; + xchg->rx_id = INVALID_VALUE16; + xchg->debug_hook = UNF_FALSE; + xchg->alloc_jif = jiffies; + + atomic_set(&xchg->ref_cnt, 1); + atomic_set(&xchg->esgl_cnt, 0); + } else { + goto try_next_mgr; + } + + return xchg; +} + +static void unf_free_xchg(void *v_lport, void *v_xchg) +{ + struct unf_xchg_s *xchg = NULL; + + UNF_REFERNCE_VAR(v_lport); + UNF_CHECK_VALID(0x958, UNF_TRUE, (v_xchg), return); + + xchg = (struct unf_xchg_s *)v_xchg; + unf_xchg_ref_dec(xchg, XCHG_FREE_XCHG); +} + +void unf_release_xchg_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x960, UNF_TRUE, v_lport, return); + + if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) has dirty exchange, Don't release exchange manager template.", + v_lport->port_id); + + return; + } + + memset(&v_lport->xchg_mgr_temp, 0, + sizeof(struct unf_cm_xchg_mgr_template_s)); + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP; +} + +static void unf_xchg_abort_all_sfs_xchg(struct unf_lport_s *v_lport, + int v_clean) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long pool_lock_flags = 0; + unsigned long xchg_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x961, UNF_TRUE, v_lport, return); + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(v_lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "Port(0x%x) Hot Pool is NULL.", + v_lport->port_id); + + continue; + } + + if (v_clean == UNF_FALSE) { + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* Clearing the SFS_Busy_list Exchange Resource */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->sfs_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_lock_flags); + if (atomic_read(&xchg->ref_cnt) > 0) + xchg->io_state |= TGT_IO_STATE_ABORT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } else { + continue; + } + } +} + +static void unf_xchg_abort_ini_io_xchg(struct unf_lport_s *v_lport, + int v_clean) +{ + /* Clean L_Port/V_Port Link Down I/O: Abort */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long pool_lock_flags = 0; + unsigned long xchg_lock_flags = 0; + unsigned int io_state = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x962, UNF_TRUE, (v_lport), return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(v_lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + v_lport->port_id); + + continue; + } + + if (v_clean == UNF_FALSE) { + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* 1. Abort INI_Busy_List IO */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->ini_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_lock_flags); + if (atomic_read(&xchg->ref_cnt) > 0) + xchg->io_state |= + INI_IO_STATE_DRABORT | io_state; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } else { + /* Do nothing, just return */ + continue; + } + } +} + +static void unf_xchg_abort_all_xchg(void *v_lport, + unsigned int v_xchg_type, + int v_clean) +{ + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x964, UNF_TRUE, v_lport, return); + lport = (struct unf_lport_s *)v_lport; + + switch (v_xchg_type) { + case UNF_XCHG_TYPE_SFS: + unf_xchg_abort_all_sfs_xchg(lport, v_clean); + break; + + /* Clean L_Port/V_Port Link Down I/O: Abort */ + case UNF_XCHG_TYPE_INI: + unf_xchg_abort_ini_io_xchg(lport, v_clean); + break; + + default: + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) unknown exch type(0x%x)", + lport->port_id, v_xchg_type); + break; + } +} + +static void unf_xchg_abort_ini_send_tm_cmd(void *v_lport, + void *v_rport, + unsigned long long v_lun_id) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned long xchg_flag = 0; + unsigned int i = 0; + unsigned long long raw_lunid = 0; + + UNF_CHECK_VALID(0x981, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x981, UNF_TRUE, v_rport, return); + + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x982, UNF_TRUE, (lport), return); + rport = (struct unf_rport_s *)v_rport; + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + lport->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + + /* 1. for each exchange from busy list */ + list_for_each_safe(node, next_node, + &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + + raw_lunid = *(unsigned long long *) + (xchg->fcp_cmnd.lun) >> 16 & + 0x000000000000ffff; + if ((v_lun_id == raw_lunid) && + (rport == xchg->rport)) { + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_flag); + xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD", + xchg, xchg->io_state, lport->nport_id, + rport->nport_id, xchg->hot_pool_tag); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } +} + +static void unf_xchg_abort_by_lun(void *v_lport, + void *v_rport, + unsigned long long v_lun_id, + void *v_tm_xchg, + int v_abort_all_lun_flag) +{ + /* ABORT: set UP_ABORT tag for target LUN I/O */ + struct unf_xchg_s *tm_xchg = (struct unf_xchg_s *)v_tm_xchg; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]Port(0x%x) LUN_ID(0x%llx) TM_EXCH(0x%p) flag(%d)", + ((struct unf_lport_s *)v_lport)->port_id, + v_lun_id, v_tm_xchg, v_abort_all_lun_flag); + + /* for INI Mode */ + if (!tm_xchg) { + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + unf_xchg_abort_ini_send_tm_cmd(v_lport, v_rport, v_lun_id); + + return; + } +} + +static void unf_xchg_abort_ini_tmf_target_reset(void *v_lport, void *v_rport) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned long xchg_flag = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x981, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x981, UNF_TRUE, v_rport, return); + + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x982, UNF_TRUE, (lport), return); + rport = (struct unf_rport_s *)v_rport; + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + lport->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + + /* 1. for each exchange from busy_list */ + list_for_each_safe(node, next_node, + &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + if (rport == xchg->rport) { + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_flag); + xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD", + xchg, xchg->io_state, + lport->nport_id, + rport->nport_id, xchg->hot_pool_tag); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } +} + +static void unf_xchg_abort_by_session(void *v_lport, void *v_rport) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]Port(0x%x) Rport(0x%x) start session reset with TMF", + ((struct unf_lport_s *)v_lport)->port_id, + ((struct unf_rport_s *)v_rport)->nport_id); + + unf_xchg_abort_ini_tmf_target_reset(v_lport, v_rport); +} + +static void unf_ini_busy_io_xchg_abort(void *v_hot_pool, void *v_rport, + unsigned int v_sid, unsigned int v_did, + unsigned int v_extra_io_state) +{ + /* + * for target session: Set (DRV) ABORT + * 1. R_Port remove + * 2. Send PLOGI_ACC callback + * 3. RCVD PLOGI + * 4. RCVD LOGO + */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_rport_s *rport = NULL; + unsigned long xchg_lock_flags = 0; + + rport = (struct unf_rport_s *)v_rport; + hot_pool = (struct unf_xchg_hot_pool_s *)v_hot_pool; + + /* ABORT INI IO: INI_BUSY_LIST */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->ini_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); + if ((v_did == xchg->did) && (v_sid == xchg->sid) && + (rport == xchg->rport) && + (atomic_read(&xchg->ref_cnt) > 0)) { + xchg->scsi_cmnd_info.result = + UNF_SCSI_HOST(DID_IMM_RETRY); + xchg->io_state |= INI_IO_STATE_DRABORT; + xchg->io_state |= v_extra_io_state; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Abort INI:0x%p, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, %llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + (unsigned int)xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } +} + +static void unf_xchg_mgr_io_xchg_abort(void *v_lport, void *v_rport, + unsigned int v_sid, unsigned int v_did, + unsigned int v_extra_io_state) +{ + /* + * for target session: set ABORT + * 1. R_Port remove + * 2. Send PLOGI_ACC callback + * 3. RCVD PLOGI + * 4. RCVD LOGO + */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_lport_s *lport = NULL; + unsigned long pool_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x983, UNF_TRUE, v_lport, return); + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x984, UNF_TRUE, lport, return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, + UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + lport->port_id); + + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* 1. Clear INI (session) IO: INI Mode */ + unf_ini_busy_io_xchg_abort(hot_pool, v_rport, v_sid, + v_did, v_extra_io_state); + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +static void unf_xchg_mgr_sfs_xchg_abort(void *v_lport, void *v_rport, + unsigned int v_sid, unsigned int v_did) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long pool_lock_flags = 0; + unsigned long xchg_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x991, UNF_TRUE, (v_lport), return); + + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x992, UNF_TRUE, (lport), return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (!hot_pool) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) Hot Pool is NULL.", + lport->port_id); + + continue; + } + + rport = (struct unf_rport_s *)v_rport; + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* Clear the SFS exchange of the corresponding connection */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->sfs_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_lock_flags); + if ((v_did == xchg->did) && (v_sid == xchg->sid) && + (rport == xchg->rport) && + (atomic_read(&xchg->ref_cnt) > 0)) { + xchg->io_state |= TGT_IO_STATE_ABORT; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "Abort SFS:0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + (unsigned int)xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +unsigned int unf_init_xchg_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x959, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_get_free_and_init = + unf_get_new_xchg; + v_lport->xchg_mgr_temp.pfn_unf_xchg_release = unf_free_xchg; + v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag = + unf_lookup_xchg_by_tag; + v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_id = + unf_find_xchg_by_oxid; + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer = + unf_fc_xchg_add_timer; + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer = + unf_xchg_cancel_timer; + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io = + unf_xchg_abort_all_xchg; + v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_cmnd_sn = + unf_lookup_xchg_by_cmnd_sn; + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_lun = + unf_xchg_abort_by_lun; + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_session = + unf_xchg_abort_by_session; + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort = + unf_xchg_mgr_io_xchg_abort; + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort = + unf_xchg_mgr_sfs_xchg_abort; + + return RETURN_OK; +} + +void unf_set_hot_pool_wait_state(struct unf_lport_s *v_lport, + enum int_e v_wait_state) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long pool_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x965, UNF_TRUE, v_lport, return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(v_lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + v_lport->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + hot_pool->wait_state = v_wait_state; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +unsigned int unf_xchg_ref_inc(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x967, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if (unlikely(v_xchg->debug_hook == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OX_ID_RX_ID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Stage(%s)", + v_xchg, v_xchg->io_state, v_xchg->sid, + v_xchg->did, v_xchg->ox_id, v_xchg->rx_id, + v_xchg->alloc_jif, atomic_read(&v_xchg->ref_cnt), + io_stage[v_io_stage].stage); + } + + hot_pool = v_xchg->hot_pool; + UNF_CHECK_VALID(0x968, UNF_TRUE, hot_pool, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_io_stage); + + /* Exchange -> Hot Pool Tag check */ + if (unlikely((v_xchg->hot_pool_tag >= + (hot_pool->slab_total_sum + hot_pool->base)) || + (v_xchg->hot_pool_tag < hot_pool->base))) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Xchg(0x%p) S_ID(%xh) D_ID(0x%x) hot_pool_tag(0x%x) is bigger than slab total num(0x%x) base(0x%x)", + v_xchg, v_xchg->sid, v_xchg->did, + v_xchg->hot_pool_tag, + hot_pool->slab_total_sum + hot_pool->base, + hot_pool->base); + + return UNF_RETURN_ERROR; + } + + /* atomic read & inc */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + if (unlikely(atomic_read(&v_xchg->ref_cnt) <= 0)) { + ret = UNF_RETURN_ERROR; + } else { + if (unf_get_xchg_by_xchg_tag(hot_pool, + v_xchg->hot_pool_tag - + hot_pool->base) == + v_xchg) { + atomic_inc(&v_xchg->ref_cnt); + ret = RETURN_OK; + } else { + ret = UNF_RETURN_ERROR; + } + } + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + return ret; +} + +void unf_xchg_ref_dec(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage) +{ + /* Atomic dec ref_cnt & test, free exchange + * if necessary (ref_cnt==0) + */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + void (*pfn_free_xchg)(struct unf_xchg_s *) = NULL; + unsigned long flags = 0; + unsigned long xchg_lock_flags = 0; + + UNF_CHECK_VALID(0x969, UNF_TRUE, (v_xchg), return); + + if (v_xchg->debug_hook == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OXID_RXID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Statge %s", + v_xchg, v_xchg->io_state, v_xchg->sid, + v_xchg->did, v_xchg->ox_id, v_xchg->rx_id, + v_xchg->alloc_jif, atomic_read(&v_xchg->ref_cnt), + io_stage[v_io_stage].stage); + } + + hot_pool = v_xchg->hot_pool; + UNF_CHECK_VALID(0x970, UNF_TRUE, hot_pool, return); + UNF_CHECK_VALID(0x970, UNF_TRUE, + v_xchg->hot_pool_tag >= hot_pool->base, return); + UNF_REFERNCE_VAR(v_io_stage); + + /* + * 1. Atomic dec & test + * 2. Free exchange if necessary (ref_cnt == 0) + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, xchg_lock_flags); + if (atomic_dec_and_test(&v_xchg->ref_cnt)) { + pfn_free_xchg = v_xchg->pfn_free_xchg; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, + xchg_lock_flags); + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + unf_hot_pool_slab_set(hot_pool, + v_xchg->hot_pool_tag - hot_pool->base, + NULL); + /* Delete exchange list entry */ + list_del_init(&v_xchg->list_xchg_entry); + hot_pool->total_xchges--; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + // unf_free_fcp_xchg --->>> unf_done_ini_xchg + if (pfn_free_xchg) + pfn_free_xchg(v_xchg); + } else { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, + xchg_lock_flags); + } +} + +bool unf_busy_io_completed(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long pool_lock_flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x5841, UNF_TRUE, v_lport, return UNF_TRUE); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) Exchange Manager is NULL", + v_lport->port_id); + continue; + } + + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + if (!list_empty(&xchg_mgr->hot_pool->ini_busylist)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, "[info]Port(0x%x) ini busylist is not empty.", + v_lport->port_id); + + spin_unlock_irqrestore( + &xchg_mgr->hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + return UNF_FALSE; + } + spin_unlock_irqrestore( + &xchg_mgr->hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } + return UNF_TRUE; +} diff --git a/drivers/scsi/huawei/hifc/unf_exchg.h b/drivers/scsi/huawei/hifc/unf_exchg.h new file mode 100644 index 000000000000..fa24cd986654 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_exchg.h @@ -0,0 +1,513 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_FCEXCH_H__ +#define __UNF_FCEXCH_H__ + +#include "unf_scsi_common.h" +#include "unf_lport.h" + +#define DRV_VERIFY_CRC_MASK (1 << 1) +#define DRV_VERIFY_APP_MASK (1 << 2) +#define DRV_VERIFY_LBA_MASK (1 << 3) + +#define DRV_DIF_CRC_POS 0 +#define DRV_DIF_CRC_LEN 2 +#define DRV_DIF_APP_POS 2 +#define DRV_DIF_APP_LEN 2 +#define DRV_DIF_LBA_POS 4 +#define DRV_DIF_LBA_LEN 4 + +enum unf_ioflow_id_e { + XCHG_ALLOC = 0, + TGT_RECEIVE_ABTS, + TGT_ABTS_DONE, + TGT_IO_SRR, + SFS_RESPONSE, + SFS_TIMEOUT, + INI_SEND_CMND, + INI_RESPONSE_DONE, + INI_EH_ABORT, + INI_EH_DEVICE_RESET, + INI_EH_BLS_DONE, + INI_IO_TIMEOUT, + INI_REQ_TIMEOUT, + XCHG_CANCEL_TIMER, + XCHG_FREE_XCHG, + SEND_ELS, + IO_XCHG_WAIT, + XCHG_BUTT +}; + +enum unf_xchg_type_e { + UNF_XCHG_TYPE_INI = 0, /* INI IO */ + UNF_XCHG_TYPE_SFS = 1, /* SFS IO */ + UNF_XCHG_TYPE_INVALID +}; + +enum unf_xchg_mgr_type_e { + UNF_XCHG_MGR_TYPE_RANDOM = 0, + UNF_XCHG_MGR_TYPE_FIXED = 1, + UNF_XCHG_MGR_TYPE_INVALID +}; + +enum tgt_io_xchg_send_stage_e { + TGT_IO_SEND_STAGE_NONE = 0, + TGT_IO_SEND_STAGE_DOING = 1, /* xfer/rsp into queue */ + TGT_IO_SEND_STAGE_DONE = 2, /* xfer/rsp into queue complete */ + TGT_IO_SEND_STAGE_ECHO = 3, /* driver handled TSTS */ + TGT_IO_SEND_STAGE_INVALID +}; + +enum tgt_io_send_result_e { + TGT_IO_SEND_RESULT_OK = 0, /* xfer/rsp enqueue succeed */ + TGT_IO_SEND_RESULT_FAIL = 1, /* xfer/rsp enqueue fail */ + TGT_IO_SEND_RESULT_INVALID +}; + +struct unf_ioflow_id_s { + char *stage; +}; + +#define UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg) \ + ((v_oxid == xchg->ox_id) && (v_oid == xchg->oid) && \ + (atomic_read(&xchg->ref_cnt) > 0)) + +#define UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, exchg, pkg_alloc_time, \ + xchg_alloc_time) \ + do { \ + if (unlikely((pkg_alloc_time != 0) && \ + (pkg_alloc_time != xchg_alloc_time))) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, \ + UNF_ERR, \ + "Lport(0x%x_0x%x_0x%x_0x%p) AllocTime is not equal,PKG AllocTime:0x%x,Exhg AllocTime:0x%x", \ + lport->port_id, lport->nport_id, \ + xchg_tag, exchg, \ + pkg_alloc_time, xchg_alloc_time); \ + return UNF_RETURN_ERROR; \ + }; \ + if (unlikely(pkg_alloc_time == 0)) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, \ + UNF_MAJOR, \ + "Lport(0x%x_0x%x_0x%x_0x%p) pkgtime err,PKG AllocTime:0x%x,Exhg AllocTime:0x%x", \ + lport->port_id, lport->nport_id, \ + xchg_tag, exchg, \ + pkg_alloc_time, xchg_alloc_time); \ + }; \ + } while (0) + +#define UNF_GET_DIF_ERROR_LEVEL1(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) \ + do { \ + if (DRV_VERIFY_CRC_MASK & \ + v_xchg->dif_control.protect_opcode) { \ + if (memcmp(&dif_control->actual_dif[DRV_DIF_CRC_POS], \ + &dif_control->expected_dif[DRV_DIF_CRC_POS], \ + DRV_DIF_CRC_LEN) != 0) { \ + tgt_err_code = default_err_code; \ + } \ + } \ + } while (0) + +#define UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) \ + do { \ + if ((check_err_code == tgt_err_code) && \ + (DRV_VERIFY_LBA_MASK & v_xchg->dif_control.protect_opcode)) { \ + if (memcmp(&dif_control->actual_dif[DRV_DIF_LBA_POS], \ + &dif_control->expected_dif[DRV_DIF_LBA_POS], \ + DRV_DIF_LBA_LEN) != 0) { \ + tgt_err_code = default_err_code; \ + } \ + } \ + } while (0) + +#define UNF_GET_DIF_ERROR_LEVEL3(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) \ + UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) + +#define UNF_SET_SCSI_CMND_RESULT(v_xchg, v_result) \ + ((v_xchg)->scsi_cmnd_info.result = (v_result)) + +#define UNF_GET_GS_SFS_XCHG_TIMER(v_lport) (3 * \ + (unsigned long)(v_lport)->ra_tov) + +#define UNF_GET_BLS_SFS_XCHG_TIMER(v_lport) (2 * \ + (unsigned long)(v_lport)->ra_tov) + +#define UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) (2 * \ + (unsigned long)(v_lport)->ra_tov) + +#define UNF_XCHG_MGR_FC 0 +#define UNF_XCHG_MIN_XID 0x0000 +#define UNF_XCHG_MAX_XID 0xffff +#define UNF_ELS_ECHO_RESULT_OK 0 +#define UNF_ELS_ECHO_RESULT_FAIL 1 + +struct unf_xchg_s; +/* Xchg hot pool, busy IO lookup Xchg */ +struct unf_xchg_hot_pool_s { + /* Xchg sum, in hot pool */ + unsigned short total_xchges; + /* Total number of resources consumedcorresponding to buffer */ + unsigned int total_res_cnt; + enum int_e wait_state; + + /* pool lock */ + spinlock_t xchg_hot_pool_lock; + + /* Xchg posiontion list */ + struct list_head sfs_busylist; + struct list_head ini_busylist; + struct list_head list_destroy_xchg; + + /* Next free hot point */ + unsigned short slab_next_index; + unsigned short slab_total_sum; + unsigned short base; + + struct unf_lport_s *lport; + + struct unf_xchg_s *xchg_slab[0]; + +}; + +/* FREE POOL of Xchg*/ +struct unf_xchg_free_pool_s { + spinlock_t xchg_free_pool_lock; + + unsigned int fcp_xchg_sum; + + /* IO used Xchg */ + struct list_head list_free_xchg_list; + unsigned int total_fcp_xchg; + + /* SFS used Xchg */ + struct list_head list_sfs_xchg_list; + unsigned int total_sfs_xchg; + unsigned int sfs_xchg_sum; + + struct completion *xchg_mgr_completion; +}; + +struct unf_big_sfs_s { + struct list_head entry_big_sfs; + void *vaddr; + unsigned int size; +}; + +struct unf_big_sfs_pool_s { + void *big_sfs_pool; + unsigned int free_count; + struct list_head list_free_pool; + struct list_head list_busy_pool; + spinlock_t big_sfs_pool_lock; +}; + +/* Xchg Manager for vport Xchg */ +struct unf_xchg_mgr_s { + /* MG type */ + unsigned int mgr_type; + + /* MG entry */ + struct list_head xchg_mgr_entry; + + /* MG attribution */ + unsigned short min_xid; + unsigned short max_xid; + unsigned int mem_size; + + /* MG alloced resource */ + void *fcp_mm_start; + + unsigned int sfs_mem_size; + void *sfs_mm_start; + dma_addr_t sfs_phy_addr; + + struct unf_xchg_free_pool_s free_pool; + struct unf_xchg_hot_pool_s *hot_pool; + + struct unf_big_sfs_pool_s st_big_sfs_pool; + + struct buf_describe_s big_sfs_buf_list; + struct buf_describe_s rsp_buf_list; + +}; + +struct unf_seq_s { + /* Seq ID */ + unsigned char seq_id; + + /* Seq Cnt */ + unsigned short seq_cnt; + + /* Seq state and len,maybe used for fcoe */ + unsigned short seq_stat; + unsigned int rec_data_len; +}; + +union unf_xchg_fcp_sfs_u { + struct unf_sfs_entry_s sfs_entry; + struct unf_fcp_rsp_iu_entry_s fcp_rsp_entry; +}; + +#define UNF_IO_STATE_NEW 0 +#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) /* succeed to send XFer rdy */ +#define TGT_IO_STATE_RSP (1 << 5) /* chip send rsp */ +#define TGT_IO_STATE_ABORT (1 << 7) + +/* INI Upper-layer Task Management Commands */ +#define INI_IO_STATE_UPTASK (1 << 15) +/* INI Upper-layer timeout Abort flag */ +#define INI_IO_STATE_UPABORT (1 << 16) +#define INI_IO_STATE_DRABORT (1 << 17) /* INI driver Abort flag */ +#define INI_IO_STATE_DONE (1 << 18) /* INI complete flag */ +#define INI_IO_STATE_WAIT_RRQ (1 << 19) /* INI wait send rrq */ +#define INI_IO_STATE_UPSEND_ERR (1 << 20) /* INI send fail flag */ +/* INI only clear firmware resource flag */ +#define INI_IO_STATE_ABORT_RESOURCE (1 << 21) +/* ioc abort:INI send ABTS ,5S timeout Semaphore,than set 1 */ +#define INI_IO_STATE_ABORT_TIMEOUT (1 << 22) +#define INI_IO_STATE_RRQSEND_ERR (1 << 23) /* INI send RRQ fail flag */ +/* INI busy IO session logo status */ +#define INI_IO_STATE_LOGO (1 << 24) +#define INI_IO_STATE_TMF_ABORT (1 << 25) /* INI TMF ABORT IO flag */ +#define INI_IO_STATE_REC_TIMEOUT_WAIT (1 << 26) /* INI REC TIMEOUT WAIT */ +#define INI_IO_STATE_REC_TIMEOUT (1 << 27) /* INI REC TIMEOUT */ + +#define TMF_RESPONSE_RECEIVED (1 << 0) +#define MARKER_STS_RECEIVED (1 << 1) +#define ABTS_RESPONSE_RECEIVED (1 << 2) + +struct unf_scsi_cmd_info_s { + unsigned long time_out; + unsigned long abort_timeout; + void *scsi_cmnd; + void (*pfn_done)(struct unf_scsi_cmd_s *); + ini_get_sgl_entry_buf pfn_unf_get_sgl_entry_buf; + struct unf_ini_error_code_s *err_code_table; /* error code table */ + char *sense_buf; + unsigned int err_code_table_cout; /* Size of the error code table */ + unsigned int buf_len; + unsigned int entry_cnt; + unsigned int result; /* Stores command execution results */ + unsigned int port_id; + /* Re-search for rport based on scsiid during retry. Otherwise, + * data inconsistency will occur + */ + unsigned int scsi_id; + void *sgl; +}; + +struct unf_req_sgl_info_s { + void *sgl; + void *sgl_start; + unsigned int req_index; + unsigned int entry_index; +}; + +struct unf_els_echo_info_s { + unsigned long long response_time; + struct semaphore echo_sync_sema; + unsigned int echo_result; +}; + +struct unf_xchg_s { + /* Mg resouce relative */ + /* list delete from HotPool */ + struct unf_xchg_hot_pool_s *hot_pool; + + /* attach to FreePool */ + struct unf_xchg_free_pool_s *free_pool; + struct unf_xchg_mgr_s *xchg_mgr; + struct unf_lport_s *lport; /* Local LPort/VLPort */ + struct unf_rport_s *rport; /* Rmote Port */ + struct unf_rport_s *disc_rport; /* Discover Rmote Port */ + struct list_head list_xchg_entry; + struct list_head list_abort_xchg_entry; + spinlock_t xchg_state_lock; + + /* Xchg reference */ + atomic_t ref_cnt; + atomic_t esgl_cnt; + int debug_hook; + /* Xchg attribution */ + unsigned short hot_pool_tag; /* Hot pool tag */ + /* Only used for abort,ox_id + * lunrset/logo/plogi/linkdown set to 0xffff + */ + unsigned short abort_oxid; + unsigned int xchg_type; /* LS,TGT CMND ,REQ,or SCSI Cmnd */ + unsigned short ox_id; + unsigned short rx_id; + unsigned int sid; + unsigned int did; + unsigned int oid; /* ID of the exchange initiator */ + unsigned int disc_port_id; /* Send GNN_ID/GFF_ID NPortId */ + unsigned char seq_id; + unsigned char byte_orders; /* Byte order */ + struct unf_seq_s seq; + + unsigned int cmnd_code; + unsigned int world_id; + /* Dif control */ + struct unf_dif_control_info_s dif_control; + struct dif_info_s dif_info; + + /* IO status Abort,timer out */ + unsigned int io_state; /* TGT_IO_STATE_E */ + unsigned int tmf_state; /* TMF STATE */ + unsigned int ucode_abts_state; + unsigned int abts_state; + + /* IO Enqueuing */ + enum tgt_io_xchg_send_stage_e io_send_stage; /* TGT_IO_SEND_STAGE_E */ + + /* IO Enqueuing result, success or failure */ + enum tgt_io_send_result_e io_send_result; /* TGT_IO_SEND_RESULT_E */ + + /* Whether ABORT is delivered to the chip for IO */ + unsigned char io_send_abort; + /* Result of delivering ABORT to the chip + * (success: UNF_TRUE; failure: UNF_FALSE) + */ + unsigned char io_abort_result; + + /* for INI,Indicates the length of the data + * transmitted over the PCI link + */ + unsigned int data_len; + + /* ResidLen,greater than 0 UnderFlow or Less than Overflow */ + int resid_len; + + /* +++++++++++++++++IO Special++++++++++++++++++++ */ + /* point to tgt cmnd/req/scsi cmnd */ + /* Fcp cmnd */ + struct unf_fcp_cmnd_s fcp_cmnd; + + struct unf_scsi_cmd_info_s scsi_cmnd_info; + + struct unf_req_sgl_info_s req_sgl_info; + + struct unf_req_sgl_info_s dif_sgl_info; + + unsigned long long cmnd_sn; + + /* timestamp */ + unsigned long long start_jif; + unsigned long long alloc_jif; + + unsigned long long io_front_jif; + + /* I/O resources to be consumed,Corresponding to buffer */ + unsigned int may_consume_res_cnt; + /* Number of resources consumed by I/Os. The value is not zero + * only when it is sent to the chip + */ + unsigned int fact_consume_res_cnt; + + /* scsi req info */ + unsigned int data_direction; + + struct unf_big_sfs_s *big_sfs_buf; + + /* scsi cmnd sense_buffer pointer */ + union unf_xchg_fcp_sfs_u fcp_sfs_union; + + /* One exchange may use several External Sgls */ + struct list_head list_esgls; + + struct unf_els_echo_info_s echo_info; + + /* +++++++++++++++++Task Special++++++++++++++++++++ */ + struct semaphore task_sema; + + /* for RRQ ,IO Xchg add to SFS Xchg */ + void *io_xchg; + + /* Xchg delay work */ + struct delayed_work timeout_work; + + /* send result callback */ + void (*pfn_ob_callback)(struct unf_xchg_s *); + + /*Response IO callback */ + void (*pfn_callback)(void *v_lport, + void *v_rport, + void *v_xchg); + + /* Xchg release function */ + void (*pfn_free_xchg)(struct unf_xchg_s *); + + /* +++++++++++++++++low level Special++++++++++++++++++++ */ + unsigned int private[PKG_MAX_PRIVATE_DATA_SIZE]; + + /* ABTS_RSP info */ + struct unf_abts_rsps_s abts_rsps; + + unsigned long long rport_bind_jifs; + + /* sfs exchg ob callback status */ + unsigned int ob_callback_sts; + unsigned int scsi_id; + atomic_t delay_flag; + void *upper_ct; +}; + +struct unf_esgl_page_s *unf_get_one_free_esgl_page(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg); +void unf_release_xchg_mgr_temp(struct unf_lport_s *v_lport); +unsigned int unf_init_xchg_mgr_temp(struct unf_lport_s *v_lport); +unsigned int unf_alloc_xchg_resource(struct unf_lport_s *v_lport); +void unf_free_all_xchg_mgr(struct unf_lport_s *v_lport); +void unf_xchg_mgr_destroy(struct unf_lport_s *v_lport); +unsigned int unf_xchg_ref_inc(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage); +void unf_xchg_ref_dec(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage); +struct unf_xchg_mgr_s *unf_get_xchg_mgr_by_lport(struct unf_lport_s *v_lport, + unsigned int); +struct unf_xchg_hot_pool_s *unf_get_hot_pool_by_lport( + struct unf_lport_s *v_lport, unsigned int); +void unf_free_lport_ini_xchg(struct unf_xchg_mgr_s *v_xchg_mgr, + int v_done_ini_flag); +struct unf_xchg_s *unf_cm_lookup_xchg_by_cmnd_sn( + void *v_lport, + unsigned long long v_command_sn, + unsigned int v_world_id); +void *unf_cm_lookup_xchg_by_id(void *v_lport, unsigned short v_oxid, + unsigned int v_oid); +void unf_cm_xchg_abort_by_lun(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned long long v_lun_id, + void *v_tm_xchg, int v_abort_all_lun_flag); +void unf_cm_xchg_abort_by_session(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); + +void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, + unsigned int v_did, + unsigned int extra_io_stat); +void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, + unsigned int v_did); +void unf_cm_free_xchg(void *v_lport, void *v_xchg); +void *unf_cm_get_free_xchg(void *v_lport, unsigned int v_xchg_type); +void *unf_cm_lookup_xchg_by_tag(void *v_lport, unsigned short v_hot_pool_tag); +void unf_release_esgls(struct unf_xchg_s *v_xchg); +void unf_show_all_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr); +void unf_destroy_dirty_xchg(struct unf_lport_s *v_lport, int v_show_only); +void unf_wakeup_scsi_task_cmnd(struct unf_lport_s *v_lport); +void unf_set_hot_pool_wait_state(struct unf_lport_s *v_lport, + enum int_e v_wait_state); +void unf_free_lport_all_xchg(struct unf_lport_s *v_lport); +bool unf_busy_io_completed(struct unf_lport_s *v_lport); +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_service.c b/drivers/scsi/huawei/hifc/unf_service.c new file mode 100644 index 000000000000..263d5b43fc99 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_service.c @@ -0,0 +1,9873 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_exchg.h" +#include "unf_log.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" +#include "unf_npiv.h" + +static void unf_flogi_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_fdisc_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_plogi_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static unsigned int unf_rec_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static void unf_gid_ft_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_gid_pt_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_process_rport_after_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +static unsigned int unf_flogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_plogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_prli_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_prlo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_rscn_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_logo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_echo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_pdisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_adisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_rrq_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_rls_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_send_els_rjt_by_rport( + struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + struct unf_rport_s *v_rport, + struct unf_rjt_info_s *v_rjt_info); + +unsigned int max_frame_size = UNF_DEFAULT_FRAME_SIZE; + +#define FCP_XFER_RDY_IU 0x05 +#define FCP_RSP_IU 0x07 +#define FCP_DATA_IU 0x01 + +#define UNF_GID_LAST_PORT_ID 0x80 +#define UNF_LOWLEVEL_BBCREDIT 0x6 +#define UNF_DEFAULT_BB_SC_N 0 +#define UNF_INIT_DISC 0x1 /* first time DISC */ +#define UNF_RSCN_DISC 0x2 /* RSCN Port Addr DISC */ +/* Reference from FCP-4 Table33 RR_TOV: REC_TOV + 2*R_A_TOV + 1S, + * REC_TOV = E_D_TOV + 1s + */ +#define UNF_CALC_LPORT_RRTOV(v_lport) \ + (((v_lport)->ed_tov + 1000) + (2 * (v_lport)->ra_tov + 1000)) + +#define UNF_GID_CONTROL(v_nport_id) ((v_nport_id) >> 24) + +#define UNF_ECHO_PLD_DATA 0x1234567890ABCDEF +#define UNF_ECHO_REQ_SIZE 0 + +#define UNF_GET_PORT_OPTIONS(v_fc4feature) ((v_fc4feature) >> 20) + +#define UNF_GET_DOMAIN_ID(x) (((x) & 0xFF0000) >> 16) /* domain id */ +#define UNF_GET_AREA_ID(x) (((x) & 0x00FF00) >> 8) /* area id */ + +#define UNF_SERVICE_GET_NPORTID_FORM_GID_PAGE(v_port_id_page) \ + (((unsigned int)(v_port_id_page)->port_id_domain << 16) | \ + ((unsigned int)(v_port_id_page)->port_id_area << 8) | \ + ((unsigned int)(v_port_id_page)->port_id_port)) + +#define UNF_GNN_GFF_ID_RJT_REASON(rjt_reason) \ + ((((rjt_reason) & UNF_CTIU_RJT_MASK) == \ + UNF_CTIU_RJT_UNABLE_PERFORM) && \ + ((((rjt_reason) & UNF_CTIU_RJT_EXP_MASK) == \ + UNF_CTIU_RJT_EXP_PORTID_NO_REG) || \ + (((rjt_reason) & UNF_CTIU_RJT_EXP_MASK) == \ + UNF_CTIU_RJT_EXP_PORTNAME_NO_REG) || \ + (((rjt_reason) & UNF_CTIU_RJT_EXP_MASK) == \ + UNF_CTIU_RJT_EXP_NODENAME_NO_REG))) + +#define UNF_NEED_BIG_RESPONSE_BUFF(cmnd_code) \ + (((cmnd_code) == ELS_ECHO) || ((cmnd_code) == NS_GID_PT) || \ + ((cmnd_code) == NS_GID_FT)) + +#define NEED_REFRESH_NPORTID(pkg) ((((pkg)->cmnd == ELS_PLOGI) || \ + ((pkg)->cmnd == ELS_PDISC) || \ + ((pkg)->cmnd == ELS_ADISC))) + +struct unf_els_handler_table { + unsigned int cmnd; + unsigned int (*pfn_els_cmnd_handler)(struct unf_lport_s *, + unsigned int, struct unf_xchg_s *); +}; + +#define UNF_SERVICE_COLLECT(service_collect, item) \ + do { \ + if ((item) < UNF_SERVICE_BUTT) { \ + service_collect.service_cnt[(item)]++; \ + } \ + } while (0) + +struct unf_els_handler_table els_handle[] = { + { ELS_PLOGI, unf_plogi_handler }, + { ELS_FLOGI, unf_flogi_handler }, + { ELS_LOGO, unf_logo_handler }, + { ELS_ECHO, unf_echo_handler }, + { ELS_RRQ, unf_rrq_handler }, + { ELS_REC, unf_rec_handler }, + { ELS_PRLI, unf_prli_handler }, + { ELS_PRLO, unf_prlo_handler }, + { ELS_PDISC, unf_pdisc_handler }, + { ELS_ADISC, unf_adisc_handler }, + { ELS_RSCN, unf_rscn_handler }, + { ELS_RLS, unf_rls_handler } +}; + +static void unf_check_rport_need_delay_prli(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_port_feature) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3300, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3301, UNF_TRUE, v_rport, return); + + v_port_feature &= UNF_PORT_MODE_BOTH; + + /* Used for: L_Port has INI mode & R_Port is not SW */ + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) { + /* + * 1. immediately: R_Port only with TGT, or + * L_Port only with INI & R_Port has TGT mode, + * send PRLI immediately + */ + if (((v_port_feature == UNF_PORT_MODE_TGT) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) || + ((v_port_feature & UNF_PORT_MODE_TGT) == + UNF_PORT_MODE_TGT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) send PRLI", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_port_feature); + + /* Send PRLI to remote port */ + ret = unf_send_prli(v_lport, v_rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) send PRLI failed", + v_lport->port_id, + v_lport->nport_id, + v_rport->nport_id, + v_port_feature); + + /* Do R_Port recovery */ + unf_rport_error_recovery(v_rport); + } + } else if (v_port_feature != UNF_PORT_MODE_INI) { + /* 2. R_Port has BOTH mode or unknown, + * Delay to send PRLI + */ + /* Prevent: PRLI done before PLOGI */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) delay to send PRLI", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_port_feature); + + /* Delay to send PRLI to R_Port */ + unf_rport_delay_login(v_rport); + } else { + /* 3. R_Port only with INI mode: wait for R_Port's + * PRLI: Do not care + */ + /* Cancel recovery(timer) work */ + if (delayed_work_pending(&v_rport->recovery_work)) { + if (cancel_delayed_work( + &v_rport->recovery_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) is pure INI", + v_lport->port_id, + v_lport->nport_id, + v_rport->nport_id, + v_port_feature); + + unf_rport_ref_dec(v_rport); + } + } + + /* Server: R_Port only support INI, + * do not care this case + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) wait for PRLI", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_port_feature); + } + } +} + +static unsigned int unf_low_level_bb_credit(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int bb_credit = UNF_LOWLEVEL_BBCREDIT; + + if (unlikely(!v_lport)) + return bb_credit; + + lport = v_lport; + if (unlikely(!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get)) + return bb_credit; + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + (void *)lport->fc_port, + UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, + (void *)&bb_credit); + if (unlikely(ret != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) get BB_Credit failed, use default value(%d)", + lport->port_id, UNF_LOWLEVEL_BBCREDIT); + + bb_credit = UNF_LOWLEVEL_BBCREDIT; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) with BB_Credit(%u)", + lport->port_id, bb_credit); + + return bb_credit; +} + +unsigned int unf_low_level_bbscn(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int bb_scn = UNF_DEFAULT_BB_SC_N; + + if (unlikely(!v_lport)) + return bb_scn; + + port_mgr = &lport->low_level_func.port_mgr_op; + + if (unlikely(!port_mgr->pfn_ll_port_config_get)) + return bb_scn; + + ret = port_mgr->pfn_ll_port_config_get((void *)lport->fc_port, + UNF_PORT_CFG_GET_WORKBALE_BBSCN, + (void *)&bb_scn); + if (unlikely(ret != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) get bbscn failed, use default value(%d)", + lport->port_id, UNF_DEFAULT_BB_SC_N); + + bb_scn = UNF_DEFAULT_BB_SC_N; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x)'s bbscn(%d)", + lport->port_id, bb_scn); + + return bb_scn; +} + +static unsigned int unf_els_cmnd_send(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned long time_out = 0; + + UNF_CHECK_VALID(0x3302, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3303, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3304, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if (unlikely(!v_lport->low_level_func.service_op.pfn_unf_els_send)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) ELS send function is NULL", + v_lport->port_id); + + return ret; + } + + /* Add ELS command/response (Exchange) timeout timer */ + time_out = UNF_GET_ELS_SFS_XCHG_TIMER(v_lport); + if (v_xchg->cmnd_code == ELS_RRQ) { + time_out = ((unsigned long) + UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) > + UNF_RRQ_MIN_TIMEOUT_INTERVAL) ? + (unsigned long) + UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) : + UNF_RRQ_MIN_TIMEOUT_INTERVAL; + } else if (v_xchg->cmnd_code == ELS_LOGO) { + time_out = UNF_LOGO_TIMEOUT_INTERVAL; + } + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer((void *)v_xchg, + time_out, + UNF_TIMER_TYPE_SFS); + + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER] = + (unsigned int)UNF_GET_ELS_SFS_XCHG_TIMER(v_lport); + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + /* Send ELS command/response */ + ret = v_lport->low_level_func.service_op.pfn_unf_els_send( + v_lport->fc_port, v_pkg); + if (unlikely(ret != RETURN_OK)) { + /* Cancel timer if send failed */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)v_xchg); + } + + return ret; +} + +static unsigned int unf_gs_cmnd_send(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3305, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3306, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3307, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if (unlikely(!v_lport->low_level_func.service_op.pfn_unf_gs_send)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) GS send function is NULL", + v_lport->port_id); + + return ret; + } + + /* Add GS command timeout timer */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)v_xchg, + (unsigned long)UNF_GET_GS_SFS_XCHG_TIMER(v_lport), + UNF_TIMER_TYPE_SFS); + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER] = (unsigned int) + UNF_GET_GS_SFS_XCHG_TIMER(v_lport); + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + /* Send GS command */ + ret = v_lport->low_level_func.service_op.pfn_unf_gs_send( + v_lport->fc_port, v_pkg); + if (unlikely(ret != RETURN_OK)) + /* Cancel timer if send failed */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)v_xchg); + + return ret; +} + +static unsigned int unf_bls_cmnd_send(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x3308, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3309, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3310, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER] = + (unsigned int)UNF_GET_BLS_SFS_XCHG_TIMER(v_lport); + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + return v_lport->low_level_func.service_op.pfn_unf_bls_send( + v_lport->fc_port, v_pkg); +} + +static void unf_fill_package(struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg, + struct unf_rport_s *v_rport) +{ + /* v_rport maybe NULL */ + UNF_CHECK_VALID(0x3311, UNF_TRUE, v_pkg, return); + UNF_CHECK_VALID(0x3312, UNF_TRUE, v_xchg, return); + + v_pkg->cmnd = v_xchg->cmnd_code; + v_pkg->fcp_cmnd = &v_xchg->fcp_cmnd; + v_pkg->frame_head.csctl_sid = v_xchg->sid; + v_pkg->frame_head.rctl_did = v_xchg->did; + v_pkg->frame_head.oxid_rxid = ((unsigned int)v_xchg->ox_id << 16 | + v_xchg->rx_id); + v_pkg->xchg_contex = v_xchg; + + UNF_CHECK_VALID(0x3313, UNF_TRUE, v_xchg->lport, return); + v_pkg->private[PKG_PRIVATE_XCHG_VP_INDEX] = + v_xchg->lport->vp_index; + + if (!v_rport) { + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX] = + UNF_RPORT_INVALID_INDEX; + v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE] = INVALID_VALUE32; + } else { + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX] = + v_rport->rport_index; + v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE] = + v_rport->max_frame_size; + } + + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = v_xchg->hot_pool_tag; + v_pkg->private[PKG_PRIVATE_LOWLEVEL_XCHG_ADD] = + v_xchg->private[PKG_PRIVATE_LOWLEVEL_XCHG_ADD]; + v_pkg->unf_cmnd_pload_bl.buffer_ptr = + (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + v_pkg->unf_cmnd_pload_bl.buf_dma_addr = + v_xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr; + + /* Low level need to know payload length if send ECHO response */ + v_pkg->unf_cmnd_pload_bl.length = + v_xchg->fcp_sfs_union.sfs_entry.cur_offset; +} + +static struct unf_xchg_s *unf_get_sfs_free_xchg_and_init( + struct unf_lport_s *v_lport, + unsigned int v_did, + struct unf_rport_s *v_rport, + union unf_sfs_u **v_fc_entry) +{ + struct unf_xchg_s *xchg = NULL; + union unf_sfs_u *fc_entry = NULL; + + xchg = unf_cm_get_free_xchg(v_lport, UNF_XCHG_TYPE_SFS); + if (!xchg) + return NULL; + + xchg->did = v_did; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = v_lport; + xchg->rport = v_rport; + xchg->disc_rport = NULL; + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) + xchg->ox_id = xchg->hot_pool_tag; + + xchg->pfn_callback = NULL; + xchg->pfn_ob_callback = NULL; + + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return NULL; + } + + *v_fc_entry = fc_entry; + + return xchg; +} + +static void unf_scr_callback(void *v_lport, + void *v_rport, + void *v_xchg) +{ + /* Callback function for SCR response: Send GID_PT with INI mode */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_disc_s *disc = &lport->disc; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_els_acc_s *els_acc = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long lport_flag = 0; + unsigned long disc_flag = 0; + unsigned int cmnd = 0; + + UNF_CHECK_VALID(0x3694, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3695, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + UNF_REFERNCE_VAR(ret); + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + return; + + els_acc = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_acc; + if (xchg->byte_orders & UNF_BIT_2) + cmnd = be32_to_cpu(els_acc->cmnd); + else + cmnd = (els_acc->cmnd); + + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* About ELS_CMND ACC */ + spin_lock_irqsave(&lport->lport_state_lock, lport_flag); + + /* Check L_Port state: SCR_WAIT */ + if (lport->en_states != UNF_LPORT_ST_SCR_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, + lport_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) receive SCR ACC with error state(0x%x)", + lport->port_id, lport->nport_id, + lport->en_states); + return; + } + + /* Update L_Port state machine: Ready */ + /* LPort: SCR_WAIT --> READY */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + if (lport->en_states == UNF_LPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) enter READY state when received SCR response", + lport->port_id, lport->nport_id); + } + + /* Start to Discovery with INI mode: GID_PT */ + if ((lport->options & UNF_PORT_MODE_INI) == + UNF_PORT_MODE_INI) { + spin_unlock_irqrestore(&lport->lport_state_lock, + lport_flag); + + if (lport->disc.unf_disc_temp.pfn_unf_disc_start) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, + disc_flag); + lport->disc.disc_option = UNF_INIT_DISC; + disc->last_disc_jiff = jiffies; + spin_unlock_irqrestore( + &disc->rport_busy_pool_lock, disc_flag); + + ret = lport->disc.unf_disc_temp.pfn_unf_disc_start(lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) DISC %s with INI mode", + lport->port_id, + (ret != RETURN_OK) ? "failed" : + "succeed"); + } + + UNF_REFERNCE_VAR(ret); + return; + } + /* TGT mode: Do not care */ + spin_unlock_irqrestore(&lport->lport_state_lock, lport_flag); + + /* NOTE: set state with UNF_DISC_ST_END used for RSCN process */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + lport->disc.en_states = UNF_DISC_ST_END; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) is TGT mode, no need to discovery", + lport->port_id); + return; + } + /* About ELS_CMND response: RJT */ + unf_lport_error_recovery(lport); + + UNF_REFERNCE_VAR(ret); +} + +static void unf_scr_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Callback fucnion for exception: Do L_Port error recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3692, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3693, UNF_TRUE, lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send SCR failed and do port recovery", + lport->port_id); + + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_scr(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* after RCVD RFF_ID ACC */ + struct unf_scr_s *scr = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3314, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3315, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + /* Get free exchange for SCR */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + NULL, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for SCR", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_SCR; /* SCR */ + ox_id = xchg->ox_id; + /* Set callback function */ + xchg->pfn_callback = unf_scr_callback; + xchg->pfn_ob_callback = unf_scr_ob_callback; + + /* Fill command/response package */ + unf_fill_package(&pkg, xchg, v_rport); + + scr = &fc_entry->scr; + memset(scr, 0, sizeof(struct unf_scr_s)); + scr->payload[0] = (UNF_GS_CMND_SCR); /* SCR is 0x62 */ + scr->payload[1] = (UNF_FABRIC_FULL_REG); /* Full registration */ + + /* Send SCR command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: SCR send %s. Port(0x%x_0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rec_pld(struct unf_rec_pld_s *v_rec_pld, + unsigned int v_sid, + unsigned short v_oxid) +{ + UNF_CHECK_VALID(0x3339, UNF_TRUE, v_rec_pld, return); + + v_rec_pld->rec_cmnd = UNF_ELS_CMND_REC; + v_rec_pld->xchg_org_sid = v_sid; + v_rec_pld->ox_id = v_oxid; + v_rec_pld->rx_id = INVALID_VALUE16; +} + +unsigned int unf_send_rec(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_io_xchg) +{ + struct unf_rec_pld_s *rec_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3324, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3325, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3325, UNF_TRUE, v_io_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get & Set new free exchange */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PLOGI", + v_lport->port_id); + return ret; + } + + xchg->cmnd_code = ELS_REC; + ox_id = xchg->ox_id; + unf_fill_package(&pkg, xchg, v_rport); + + rec_pld = &fc_entry->rec.rec_pld; + memset(rec_pld, 0, sizeof(struct unf_rec_pld_s)); + + unf_fill_rec_pld(rec_pld, v_lport->nport_id, v_io_xchg->ox_id); + + /* Start to Send REC command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]LOGIN: Send REC %s. Port(0x%x_0x%x_0x%llx)--->RPort(0x%x_0x%llx) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, v_lport->port_name, + v_rport->nport_id, v_rport->port_name, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_flogi_pld(struct unf_flogi_payload_s *v_flogi_pld, + struct unf_lport_s *v_lport) +{ + struct unf_fabric_parms_s *fabric_parms = NULL; + + UNF_CHECK_VALID(0x3316, UNF_TRUE, v_flogi_pld, return); + UNF_CHECK_VALID(0x3317, UNF_TRUE, v_lport, return); + + fabric_parms = &v_flogi_pld->fabric_parms; + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) || + (v_lport->en_act_topo == UNF_TOP_P2P_MASK)) { + /* Fabric or P2P topology */ + fabric_parms->co_parms.bb_credit = + unf_low_level_bb_credit(v_lport); + fabric_parms->co_parms.lowest_version = + UNF_PLOGI_VERSION_LOWER; + fabric_parms->co_parms.highest_version = + UNF_PLOGI_VERSION_UPPER; + fabric_parms->co_parms.bb_receive_data_field_size = + (v_lport->max_frame_size); + fabric_parms->co_parms.bb_scn = unf_low_level_bbscn(v_lport); + } else { + /* Loop topology here */ + fabric_parms->co_parms.clean_address = + UNF_CLEAN_ADDRESS_DEFAULT; + fabric_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; + fabric_parms->co_parms.lowest_version = + UNF_PLOGI_VERSION_LOWER; + fabric_parms->co_parms.highest_version = + UNF_PLOGI_VERSION_UPPER; + fabric_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; /* :1 */ + fabric_parms->co_parms.bb_receive_data_field_size = + (v_lport->max_frame_size); + } + + if (v_lport->low_level_func.support_max_npiv_num != 0) + fabric_parms->co_parms.clean_address = 1; /* support NPIV */ + + fabric_parms->cl_parms[2].valid = UNF_CLASS_VALID; + fabric_parms->cl_parms[2].priority = UNF_PRIORITY_DISABLE; + + fabric_parms->cl_parms[2].sequential_delivery = + UNF_SEQUEN_DELIVERY_REQ; + fabric_parms->cl_parms[2].received_data_field_size = + (v_lport->max_frame_size); + + fabric_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + fabric_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + fabric_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + fabric_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); +} + +static void unf_flogi_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Send FLOGI failed & Do L_Port recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3644, UNF_TRUE, v_xchg, return); + + /* Get L_port from exchange context */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + UNF_CHECK_VALID(0x3645, UNF_TRUE, lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FLOGI failed", + lport->port_id); + + /* Check L_Port state */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send FLOGI failed with state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + return; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Do L_Port error recovery */ + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_flogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_xchg_s *xchg = NULL; + struct unf_flogi_payload_s *flogi_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3318, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3319, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + /* Get & Set New free Exchange Context */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for FLOGI", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_FLOGI; /* FLOGI */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* for rcvd flogi acc/rjt processer */ + xchg->pfn_callback = unf_flogi_callback; + /* for send flogi failed processer */ + xchg->pfn_ob_callback = unf_flogi_ob_callback; + + /* Fill package: Exchange --to-->> Package */ + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill Flogi Payload */ + flogi_pld = &fc_entry->flogi.flogi_payload; + memset(flogi_pld, 0, sizeof(struct unf_flogi_payload_s)); + unf_fill_flogi_pld(flogi_pld, v_lport); + flogi_pld->cmnd = (UNF_ELS_CMND_FLOGI); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Begin to send FLOGI. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, flogi_pld, + sizeof(struct unf_flogi_payload_s)); + + /* Start to send FLOGI command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]LOGIN: Send FLOGI failed. Port(0x%x)--->rport(0x%x)", + v_lport->port_id, v_rport->nport_id); + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + } + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fdisc_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3638, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: FDISC send failed"); + + UNF_CHECK_VALID(0x3639, UNF_TRUE, NULL != lport, return); + + /* Do L_Port error recovery */ + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_fdisc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_xchg_s *exch = NULL; + struct unf_flogi_payload_s *fdisc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3320, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3321, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + exch = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!exch) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for FDISC", + v_lport->port_id); + + return ret; + } + + exch->cmnd_code = ELS_FDISC; /* FDISC */ + + ox_id = exch->ox_id; + + /* Set callback function */ + exch->pfn_callback = unf_fdisc_callback; + exch->pfn_ob_callback = unf_fdisc_ob_callback; + + unf_fill_package(&pkg, exch, v_rport); + + /* Fill FDISC entry(payload) */ + fdisc_pld = &fc_entry->fdisc.fdisc_payload; + memset(fdisc_pld, 0, sizeof(struct unf_flogi_payload_s)); + unf_fill_flogi_pld(fdisc_pld, v_lport); + fdisc_pld->cmnd = UNF_ELS_CMND_FDISC; /* update cmnd type */ + + /* Start to send FDISC */ + ret = unf_els_cmnd_send(v_lport, &pkg, exch); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)exch); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: FDISC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_plogi_pld(struct unf_plogi_payload_s *v_plogi_pld, + struct unf_lport_s *v_lport) +{ + struct unf_lgn_parms_s *login_parms = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3322, UNF_TRUE, v_plogi_pld, return); + UNF_CHECK_VALID(0x3323, UNF_TRUE, v_lport, return); + + lport = v_lport->root_lport; + v_plogi_pld->cmnd = (UNF_ELS_CMND_PLOGI); + login_parms = &v_plogi_pld->parms; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + /* P2P or Fabric mode */ + login_parms->co_parms.bb_credit = + (unf_low_level_bb_credit(v_lport)); + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_NFPORT; /* 0 */ + login_parms->co_parms.bb_scn = + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) ? + 0 : unf_low_level_bbscn(v_lport); + } else { + /* Public loop & Private loop mode */ + login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; /* 0 */ + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; /* 1 */ + } + + login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; + login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; + login_parms->co_parms.continuously_increasing = + UNF_CONTIN_INCREASE_SUPPORT; + login_parms->co_parms.bb_receive_data_field_size = + (v_lport->max_frame_size); + login_parms->co_parms.nport_total_concurrent_sequences = + (UNF_PLOGI_CONCURRENT_SEQ); + login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); + login_parms->co_parms.e_d_tov = UNF_DEFAULT_EDTOV; + if (lport->b_priority == UNF_PRIORITY_ENABLE) + login_parms->cl_parms[2].priority = UNF_PRIORITY_ENABLE; + else + login_parms->cl_parms[2].priority = UNF_PRIORITY_DISABLE; + + login_parms->cl_parms[2].valid = UNF_CLASS_VALID; /* for class_3 */ + login_parms->cl_parms[2].received_data_field_size = + (v_lport->max_frame_size); + login_parms->cl_parms[2].concurrent_sequences = + (UNF_PLOGI_CONCURRENT_SEQ); + login_parms->cl_parms[2].open_sequences_per_exchange = + (UNF_PLOGI_SEQ_PER_XCHG); + + login_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + login_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + login_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + login_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_plogi_pld, + sizeof(struct unf_plogi_payload_s)); +} + +static void unf_plogi_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do L_Port or R_Port recovery */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3656, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3657, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3734, UNF_TRUE, rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI(0x%x_0x%x) to RPort(%p:0x%x_0x%x) failed", + lport->port_id, lport->nport_id, v_xchg->ox_id, + v_xchg->rx_id, rport, rport->rport_index, rport->nport_id); + + /* Start to recovery */ + if (rport->nport_id > UNF_FC_FID_DOM_MGR) { + /* with Name server: R_Port is fabric --->>> + * L_Port error recovery + */ + unf_lport_error_recovery(lport); + } else { + /* R_Port is not fabric --->>> R_Port error recovery */ + unf_rport_error_recovery(rport); + } +} + +unsigned int unf_send_plogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_plogi_payload_s *plogi_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3324, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3325, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get & Set new free exchange */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PLOGI", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_PLOGI; /* PLOGI */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* for rcvd plogi acc/rjt processer */ + xchg->pfn_callback = unf_plogi_callback; + /* for send plogi failed processer */ + xchg->pfn_ob_callback = unf_plogi_ob_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill PLOGI payload */ + plogi_pld = &fc_entry->plogi.payload; + memset(plogi_pld, 0, sizeof(struct unf_plogi_payload_s)); + unf_fill_plogi_pld(plogi_pld, v_lport); + + /* Start to Send PLOGI command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send PLOGI %s. Port(0x%x_0x%x_0x%llx)--->rport(0x%x_0x%llx) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, v_lport->port_name, + v_rport->nport_id, v_rport->port_name, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_logo_pld(struct unf_logo_payload_s *v_logo_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3326, UNF_TRUE, v_logo_pld, return); + UNF_CHECK_VALID(0x3327, UNF_TRUE, v_lport, return); + + v_logo_pld->cmnd = UNF_ELS_CMND_LOGO; + v_logo_pld->nport_id = (v_lport->nport_id); + v_logo_pld->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + v_logo_pld->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_logo_pld, + sizeof(struct unf_logo_payload_s)); +} + +static void unf_logo_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport; + struct unf_rport_s *rport; + struct unf_rport_s *old_rport; + struct unf_xchg_s *xchg; + unsigned int nport_id = 0; + unsigned int logo_retry = 0; + + UNF_CHECK_VALID(0x3675, UNF_TRUE, NULL, return); + xchg = v_xchg; + old_rport = xchg->rport; + logo_retry = old_rport->logo_retries; + + if (old_rport->nport_id != INVALID_VALUE32) + unf_rport_enter_closing(old_rport); + + lport = xchg->lport; + if (unf_is_lport_valid(lport) != RETURN_OK) + return; + + /* Get R_Port by exchange info: Init state */ + nport_id = xchg->did; + rport = unf_get_rport_by_nport_id(lport, nport_id); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_INIT, + nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) cannot allocate RPort", + lport->port_id); + return; + } + + rport->logo_retries = logo_retry; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]LOGIN: Port(0x%x) received LOGO RSP timeout topo(0x%x) retries(%u)", + lport->port_id, lport->en_act_topo, rport->logo_retries); + + /* RCVD LOGO/PRLO & SEND LOGO: the same process */ + if (rport->logo_retries < UNF_MAX_RETRY_COUNT) { + /* <: retry (LOGIN or LOGO) if necessary */ + unf_process_rport_after_logo(lport, rport); + } else { + /* >=: Link down */ + unf_rport_immediate_linkdown(lport, rport); + } +} + +static void unf_logo_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RCVD LOGO ACC/RJT: retry(LOGIN/LOGO) or link down immediately */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *rport = NULL; + struct unf_rport_s *old_rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_els_rjt_s *els_acc_rjt = NULL; + unsigned int cmnd = 0; + unsigned int nport_id = 0; + unsigned int logo_retry = 0; + + UNF_CHECK_VALID(0x3675, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + + xchg = (struct unf_xchg_s *)v_xchg; + old_rport = xchg->rport; + + logo_retry = old_rport->logo_retries; + if (old_rport->nport_id != INVALID_VALUE32) + unf_rport_enter_closing(old_rport); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) + return; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + return; + + /* Get R_Port by exchange info: Init state */ + els_acc_rjt = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_rjt; + nport_id = xchg->did; + rport = unf_get_rport_by_nport_id(lport, nport_id); + rport = unf_get_safe_rport(lport, rport, + UNF_RPORT_REUSE_INIT, nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) cannot allocate RPort", + lport->port_id); + return; + } + + rport->logo_retries = logo_retry; + cmnd = be32_to_cpu(els_acc_rjt->cmnd); + UNF_REFERNCE_VAR(cmnd); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x) received LOGO RSP(0x%x), topo(0x%x) Port options(0x%x) RPort options(0x%x) retries(%d)", + lport->port_id, (cmnd & UNF_ELS_CMND_HIGH_MASK), + lport->en_act_topo, + lport->options, rport->options, rport->logo_retries); + + /* RCVD LOGO/PRLO & SEND LOGO: the same process */ + if (rport->logo_retries < UNF_MAX_RETRY_COUNT) + /* <: retry (LOGIN or LOGO) if necessary */ + unf_process_rport_after_logo(lport, rport); + else + /* >=: Link down */ + unf_rport_immediate_linkdown(lport, rport); +} + +unsigned int unf_send_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_logo_payload_s *logo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3328, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for LOGO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_LOGO; /* LOGO */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* retry or link down immediately */ + xchg->pfn_callback = unf_logo_callback; + xchg->pfn_ob_callback = unf_logo_ob_callback; /* do nothing */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill LOGO entry(payload) */ + logo_pld = &fc_entry->logo.payload; + memset(logo_pld, 0, sizeof(struct unf_logo_payload_s)); + unf_fill_logo_pld(logo_pld, v_lport); + + /* Start to send LOGO command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + v_rport->logo_retries++; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]LOGIN: LOGO send %s. Port(0x%x)--->rport(0x%x) OXID(0x%x) Retries(%d)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, + ox_id, v_rport->logo_retries); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +unsigned int unf_send_logo_by_did(struct unf_lport_s *v_lport, + unsigned int v_did) +{ + /* Has non R_Port */ + struct unf_logo_payload_s *logo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3329, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_did, NULL, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for LOGO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_LOGO; /* LOGO */ + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, NULL); + + /* Fill LOGO entry(payload) */ + logo_pld = &fc_entry->logo.payload; + memset(logo_pld, 0, sizeof(struct unf_logo_payload_s)); + unf_fill_logo_pld(logo_pld, v_lport); + + /* Start to send LOGO now */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: LOGO send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_did, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void *unf_get_one_big_sfs_buf(struct unf_xchg_s *v_xchg) +{ + struct unf_big_sfs_s *big_sfs = NULL; + struct list_head *list_head = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3330, UNF_TRUE, v_xchg, return NULL); + xchg_mgr = v_xchg->xchg_mgr; + UNF_CHECK_VALID(0x3331, UNF_TRUE, xchg_mgr, return NULL); + + spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag); + if (!list_empty(&xchg_mgr->st_big_sfs_pool.list_free_pool)) { + /* from free to busy */ + list_head = (&xchg_mgr->st_big_sfs_pool.list_free_pool)->next; + list_del(list_head); + xchg_mgr->st_big_sfs_pool.free_count--; + list_add_tail(list_head, + &xchg_mgr->st_big_sfs_pool.list_busy_pool); + big_sfs = list_entry(list_head, struct unf_big_sfs_s, + entry_big_sfs); + } else { + spin_unlock_irqrestore( + &xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Allocate big sfs buf failed, count(0x%x) exchange(0x%p) command(0x%x)", + xchg_mgr->st_big_sfs_pool.free_count, + v_xchg, v_xchg->cmnd_code); + + return NULL; + } + spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); + + v_xchg->big_sfs_buf = big_sfs; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Allocate one address(0x%p) of big sfs buffer, remaining count(0x%x) exchange(0x%p) command(0x%x)", + big_sfs->vaddr, + xchg_mgr->st_big_sfs_pool.free_count, + v_xchg, + v_xchg->cmnd_code); + + return big_sfs->vaddr; +} + +static void unf_echo_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *rport = (struct unf_rport_s *)v_rport; + struct unf_xchg_s *xchg = NULL; + struct unf_echo_payload_s *echo_rsp_pld = NULL; + unsigned int cmnd = 0; + unsigned int mag_ver_local = 0; + unsigned int mag_ver_remote = 0; + + UNF_CHECK_VALID(0x3332, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3333, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3334, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(rport); + + xchg = (struct unf_xchg_s *)v_xchg; + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + return; + + echo_rsp_pld = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.echo_pld; + UNF_CHECK_VALID(0x3335, UNF_TRUE, NULL != echo_rsp_pld, return); + + if (xchg->byte_orders & UNF_BIT_2) { + unf_big_end_to_cpu((unsigned char *)echo_rsp_pld, + sizeof(struct unf_echo_payload_s)); + cmnd = echo_rsp_pld->cmnd; + } else { + cmnd = echo_rsp_pld->cmnd; + } + + mag_ver_local = echo_rsp_pld->data[0]; + mag_ver_remote = echo_rsp_pld->data[1]; + + /* Print info */ + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + if ((mag_ver_local == ECHO_MG_VERSION_LOCAL) && + (mag_ver_remote == ECHO_MG_VERSION_REMOTE)) { + /* both side are 1822 */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local snd echo:(0x%x), remote rcv echo:(0x%x), remote snd echo acc:(0x%x), local rcv echo acc:(0x%x)", + lport->port_id, rport->nport_id, + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME], + xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); + } else if ((mag_ver_local == ECHO_MG_VERSION_LOCAL) && + (mag_ver_remote != ECHO_MG_VERSION_REMOTE)) { + /* the peer don't supprt smartping, only local snd + * and rcv rsp time stamp + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local snd echo:(0x%x), local rcv echo acc:(0x%x)", + lport->port_id, rport->nport_id, + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); + } else if ((mag_ver_local != ECHO_MG_VERSION_LOCAL) && + (mag_ver_remote != ECHO_MG_VERSION_REMOTE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local and remote is not IN300", + lport->port_id, rport->nport_id); + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ECHO to RPort(0x%x) and received RJT", + lport->port_id, rport->nport_id); + } + + xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK; + xchg->echo_info.response_time = jiffies - + xchg->echo_info.response_time; + + /* wake up semaphore */ + up(&xchg->echo_info.echo_sync_sema); +} + +static void unf_echo_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x3336, UNF_TRUE, v_xchg, return); + lport = v_xchg->lport; + UNF_CHECK_VALID(0x3337, UNF_TRUE, lport, return); + rport = v_xchg->rport; + UNF_CHECK_VALID(0x3338, UNF_TRUE, rport, return); + + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ECHO to RPort(0x%x) but timeout", + lport->port_id, rport->nport_id); + + v_xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_FAIL; + + /* wake up semaphore */ + up(&v_xchg->echo_info.echo_sync_sema); +} + +unsigned int unf_send_echo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int *v_time) +{ + struct unf_echo_payload_s *echo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long delay = 0; + unsigned short ox_id = 0; + dma_addr_t phy_echo_addr; + + UNF_CHECK_VALID(0x3340, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3341, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3342, UNF_TRUE, v_time, return UNF_RETURN_ERROR); + + delay = 2 * (unsigned long)(v_lport->ra_tov); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for ECHO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_ECHO; /* ECHO */ + + xchg->fcp_sfs_union.sfs_entry.cur_offset = UNF_ECHO_REQ_SIZE; + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_echo_callback; /* wake up semaphore */ + xchg->pfn_ob_callback = unf_echo_ob_callback; /* wake up semaphore */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill ECHO entry(payload) */ + echo_pld = (struct unf_echo_payload_s *)unf_get_one_big_sfs_buf(xchg); + if (!echo_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't allocate buffer for ECHO", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + + fc_entry->echo.echo_pld = echo_pld; + phy_echo_addr = pci_map_single(v_lport->low_level_func.dev, echo_pld, + UNF_ECHO_PAYLOAD_LEN, DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error( + v_lport->low_level_func.dev, phy_echo_addr)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) pci map err", + v_lport->port_id); + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + fc_entry->echo.phy_echo_addr = phy_echo_addr; + memset(echo_pld, 0, sizeof(struct unf_echo_payload_s)); + echo_pld->cmnd = (UNF_ELS_CMND_ECHO); + echo_pld->data[0] = ECHO_MG_VERSION_LOCAL; + + ret = unf_xchg_ref_inc(xchg, SEND_ELS); + UNF_CHECK_VALID(0x3343, UNF_TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + /* Start to send ECHO command */ + xchg->echo_info.response_time = jiffies; + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) { + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + } else { + if (down_timeout(&xchg->echo_info.echo_sync_sema, + (long) + msecs_to_jiffies((unsigned int)delay))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]ECHO send %s. Port(0x%x)--->rport(0x%x) but response timeout with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + xchg->echo_info.echo_result = + UNF_ELS_ECHO_RESULT_FAIL; + } + + if (xchg->echo_info.echo_result == + UNF_ELS_ECHO_RESULT_FAIL) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "Echo send fail or timeout"); + + ret = UNF_RETURN_ERROR; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "echo acc rsp,echo_cmd_snd(0x%xus)-->echo_cmd_rcv(0x%xus)-->echo_acc_snd(0x%xus)-->echo_acc_rcv(0x%xus).", + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME], + xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); + + *v_time = ( + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME] - + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME]) - + (xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME] - + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME]); + } + } + + pci_unmap_single(v_lport->low_level_func.dev, phy_echo_addr, + UNF_ECHO_PAYLOAD_LEN, DMA_BIDIRECTIONAL); + fc_entry->echo.phy_echo_addr = 0; + unf_xchg_ref_dec(xchg, SEND_ELS); + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_prli_pld(struct unf_pril_payload_s *v_prli_pld, + struct unf_lport_s *v_lport) +{ + unsigned int pld_len = 0; + + UNF_CHECK_VALID(0x3344, UNF_TRUE, v_prli_pld, return); + UNF_CHECK_VALID(0x3345, UNF_TRUE, v_lport, return); + + pld_len = sizeof(struct unf_pril_payload_s) - UNF_PRLI_SIRT_EXTRA_SIZE; + v_prli_pld->cmnd = (UNF_ELS_CMND_PRLI | + ((unsigned int)UNF_FC4_FRAME_PAGE_SIZE << + UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | + ((unsigned int)pld_len)); + + v_prli_pld->parms[0] = (UNF_FC4_FRAME_PARM_0_FCP | + UNF_FC4_FRAME_PARM_0_I_PAIR); + v_prli_pld->parms[1] = UNF_NOT_MEANINGFUL; + v_prli_pld->parms[2] = UNF_NOT_MEANINGFUL; + + /* About Read Xfer_rdy disable */ + v_prli_pld->parms[3] = (UNF_FC4_FRAME_PARM_3_R_XFER_DIS | + v_lport->options); + + /* About FCP confirm */ + if (v_lport->low_level_func.lport_cfg_items.fcp_conf == UNF_TRUE) + v_prli_pld->parms[3] |= UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + + /* About Tape support */ + if (v_lport->low_level_func.lport_cfg_items.tape_support) { + v_prli_pld->parms[3] |= + (UNF_FC4_FRAME_PARM_3_REC_SUPPORT | + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT | + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT | + UNF_FC4_FRAME_PARM_3_CONF_ALLOW); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x)'s PRLI payload: options(0x%x) parameter-3(0x%x)", + v_lport->port_id, v_lport->options, v_prli_pld->parms[3]); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_prli_pld, + sizeof(struct unf_pril_payload_s)); +} + +static void unf_prli_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RCVD PRLI RSP: ACC or RJT --->>> SCSI Link Up */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_pril_payload_s *prli_acc_pld = NULL; + unsigned long flag = 0; + unsigned int cmnd = 0; + unsigned int options = 0; + unsigned int fcp_conf = 0; + unsigned int rec_support = 0; + unsigned int task_retry_support = 0; + unsigned int retry_support = 0; + unsigned int tape_support = 0; + enum unf_rport_login_state_e rport_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3679, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3680, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3681, UNF_TRUE, v_xchg, return); + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + xchg = (struct unf_xchg_s *)v_xchg; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange(%p) entry is NULL", + lport->port_id, xchg); + return; + } + + /* Get PRLI ACC payload */ + prli_acc_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prli_acc.payload; + if (xchg->byte_orders & UNF_BIT_2) { + /* Change to little End, About INI/TGT mode & confirm info */ + options = be32_to_cpu(prli_acc_pld->parms[3]) & + (UNF_FC4_FRAME_PARM_3_TGT | + UNF_FC4_FRAME_PARM_3_INI); + + cmnd = be32_to_cpu(prli_acc_pld->cmnd); + fcp_conf = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + rec_support = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_REC_SUPPORT; + task_retry_support = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; + retry_support = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; + + } else { + options = (prli_acc_pld->parms[3]) & + (UNF_FC4_FRAME_PARM_3_TGT | + UNF_FC4_FRAME_PARM_3_INI); + + cmnd = (prli_acc_pld->cmnd); + fcp_conf = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + rec_support = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_REC_SUPPORT; + task_retry_support = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; + retry_support = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLI RSP: RPort(0x%x) parameter-3(0x%x) option(0x%x) cmd(0x%x) rec support:%u", + rport->nport_id, prli_acc_pld->parms[3], options, + cmnd, rec_support); + + /* PRLI ACC: R_Port READY & Report R_Port Link Up */ + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* Update R_Port options(INI/TGT/BOTH) */ + rport->options = options; + + unf_update_port_feature(rport->port_name, rport->options); + + /* NOTE: R_Port only with INI mode, send LOGO */ + if (rport->options == UNF_PORT_MODE_INI) { + /* Update R_Port state: LOGO */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* NOTE: Start to Send LOGO */ + unf_rport_enter_logo(lport, rport); + return; + } + + /* About confirm */ + if (fcp_conf && + (lport->low_level_func.lport_cfg_items.fcp_conf != + UNF_FALSE)) { + rport->fcp_conf_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) FCP config is need for RPort(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id); + } + tape_support = (rec_support && task_retry_support && retry_support); + if (tape_support && + (lport->low_level_func.lport_cfg_items.tape_support != UNF_FALSE)) { + rport->tape_support_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x_0x%x) Rec is enabled for RPort(0x%x)", + lport->port_id, lport->nport_id, rport->nport_id); + } + /* Update R_Port state: READY */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_READY); + rport_state = rport->rp_state; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Report R_Port online (Link Up) event to SCSI */ + if (rport_state == UNF_RPORT_ST_READY) { + rport->logo_retries = 0; + unf_update_lport_state_by_linkup_event( + lport, rport, rport->options); + } + } else { + /* PRLI RJT: Do R_Port error recovery */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x)<---LS_RJT(DID:0x%x SID:0x%x) for PRLI. RPort(0x%p) OX_ID(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id, rport, xchg->ox_id); + + unf_rport_error_recovery(rport); + } +} + +static void unf_prli_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do R_Port recovery */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3676, UNF_TRUE, v_xchg, return); + + UNF_REFERNCE_VAR(lport); + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3677, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3678, UNF_TRUE, rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) RPort(0x%x) send PRLI failed and do recovery", + lport->port_id, lport->nport_id, rport->nport_id); + + /* Start to do R_Port error recovery */ + unf_rport_error_recovery(rport); + + UNF_REFERNCE_VAR(lport); +} + +unsigned int unf_send_prli(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_pril_payload_s *prli_pal = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3346, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3347, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + /* Get & Set new free exchange */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PRLI", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_PRLI; // PRLI + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* for rcvd prli acc/rjt processer */ + xchg->pfn_callback = unf_prli_callback; + /* for send prli failed processer */ + xchg->pfn_ob_callback = unf_prli_ob_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill PRLI payload */ + prli_pal = &fc_entry->prli.payload; + memset(prli_pal, 0, sizeof(struct unf_pril_payload_s)); + unf_fill_prli_pld(prli_pal, v_lport); + + /* Start to Send RPLI ELS CMND */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLI send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_prlo_pld(struct unf_pril_payload_s *v_prlo_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3348, UNF_TRUE, v_prlo_pld, return); + UNF_CHECK_VALID(0x3349, UNF_TRUE, v_lport, return); + + v_prlo_pld->cmnd = (UNF_ELS_CMND_PRLO); + v_prlo_pld->parms[0] = (UNF_FC4_FRAME_PARM_0_FCP); + v_prlo_pld->parms[1] = UNF_NOT_MEANINGFUL; + v_prlo_pld->parms[2] = UNF_NOT_MEANINGFUL; + v_prlo_pld->parms[3] = UNF_NO_SERVICE_PARAMS; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_prlo_pld, + sizeof(struct unf_pril_payload_s)); +} + +unsigned int unf_send_prlo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_pril_payload_s *prlo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3350, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3351, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get free exchange for PRLO */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PRLO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_PRLO; /* PRLO */ + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill PRLO entry(payload) */ + prlo_pld = &fc_entry->prlo.payload; + memset(prlo_pld, 0, sizeof(struct unf_pril_payload_s)); + unf_fill_prlo_pld(prlo_pld, v_lport); + + /* Start to send PRLO command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLO send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rrq_pld(struct unf_rrq_s *v_rrq_pld, + struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x3360, UNF_TRUE, v_rrq_pld, return); + UNF_CHECK_VALID(0x3361, UNF_TRUE, v_xchg, return); + + v_rrq_pld->cmnd = UNF_ELS_CMND_RRQ; + v_rrq_pld->sid = v_xchg->sid; + v_rrq_pld->oxid_rxid = ((unsigned int)v_xchg->ox_id << 16 | + v_xchg->rx_id); +} + +static void unf_rrq_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* Release I/O */ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_els_acc_s *els_acc = NULL; + unsigned int cmnd = 0; + struct unf_xchg_s *io_xchg = NULL; + + UNF_CHECK_VALID(0x3696, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3697, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3698, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + + lport = (struct unf_lport_s *)v_lport; + UNF_REFERNCE_VAR(lport); + xchg = (struct unf_xchg_s *)v_xchg; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) exchange(0x%p) SfsEntryPtr is NULL", + lport->port_id, xchg); + return; + } + + io_xchg = (struct unf_xchg_s *)xchg->io_xchg; + if (!io_xchg) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) IO exchange is NULL. RRQ cb sfs xchg(0x%p) tag(0x%x)", + lport->port_id, xchg, xchg->hot_pool_tag); + return; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) release IO exch(0x%p) tag(0x%x). RRQ cb sfs xchg(0x%p) tag(0x%x)", + lport->port_id, xchg->io_xchg, io_xchg->hot_pool_tag, + xchg, xchg->hot_pool_tag); + + /* NOTE: release I/O exchange resource */ + unf_xchg_ref_dec(io_xchg, XCHG_ALLOC); + + els_acc = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_acc; + if (xchg->byte_orders & UNF_BIT_2) + cmnd = be32_to_cpu(els_acc->cmnd); + else + cmnd = (els_acc->cmnd); +} + +static void unf_rrq_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Release I/O */ + struct unf_xchg_s *xchg = NULL; + struct unf_xchg_s *io_xchg = NULL; + + xchg = (struct unf_xchg_s *)v_xchg; + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Exchange can't be NULL"); + return; + } + + io_xchg = (struct unf_xchg_s *)xchg->io_xchg; + if (!io_xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]IO exchange can't be NULL with Sfs exch(0x%p) tag(0x%x)", + xchg, xchg->hot_pool_tag); + return; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]send RRQ failed: SFS exch(0x%p) tag(0x%x) exch(0x%p) tag(0x%x) OXID_RXID(0x%x_0x%x) SID_DID(0x%x_0x%x)", + xchg, xchg->hot_pool_tag, io_xchg, io_xchg->hot_pool_tag, + io_xchg->ox_id, io_xchg->rx_id, io_xchg->sid, + io_xchg->did); + + /* NOTE: Free I/O exchange resource */ + unf_xchg_ref_dec(io_xchg, XCHG_ALLOC); +} + +unsigned int unf_send_rrq(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + /* after ABTS Done */ + struct unf_rrq_s *rrq_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3362, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3363, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3364, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + /* Get & Set New free Exchange for RRQ */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for RRQ", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_RRQ; // RRQ + + /* Set callback function */ + xchg->pfn_callback = unf_rrq_callback; // release I/O exchange context + /* release I/O exchange context */ + xchg->pfn_ob_callback = unf_rrq_ob_callback; + xchg->io_xchg = v_xchg; // pointer to IO XCHG + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill RRQ entry(payload) */ + rrq_pld = &fc_entry->rrq; + memset(rrq_pld, 0, sizeof(struct unf_rrq_s)); + unf_fill_rrq_pld(rrq_pld, v_xchg); + + /* Start to send RRQ command to remote port */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RRQ send %s. Port(0x%x)--->rport(0x%x) free old exchange(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, + v_xchg->hot_pool_tag, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gff_id_pld(struct unf_gffid_s *v_gff_id, + unsigned int v_nport_id) +{ + UNF_CHECK_VALID(0x3365, UNF_TRUE, v_gff_id, return); + + v_gff_id->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); + v_gff_id->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); + v_gff_id->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GFF_ID); + v_gff_id->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + v_gff_id->nport_id = v_nport_id; +} + +static void unf_gff_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Send PLOGI */ + struct unf_lport_s *lport = NULL; + struct unf_lport_s *root_lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x3611, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + nport_id = v_xchg->disc_port_id; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3612, UNF_TRUE, NULL != lport, return); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + /* Get (safe) R_Port */ + rport = unf_get_rport_by_nport_id(lport, nport_id); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't allocate new RPort(0x%x)", + lport->port_id, nport_id); + return; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) send GFF_ID(0x%x_0x%x) to RPort(0x%x_0x%x) abnormal", + lport->port_id, lport->nport_id, v_xchg->ox_id, + v_xchg->rx_id, rport->rport_index, rport->nport_id); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* NOTE: Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send PLOGI failed, enter recovry", + lport->port_id); + + /* Do R_Port recovery */ + unf_rport_error_recovery(rport); + } +} + +static void unf_check_rport_need_delay_plogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_port_feature) +{ + /* + * Called by: + * 1. Private loop + * 2. RCVD GFF_ID ACC + */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + unsigned int nport_id = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3613, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3614, UNF_TRUE, v_rport, return); + nport_id = rport->nport_id; + + /* + * Send GFF_ID means L_Port has INI attribute + ** + * When to send PLOGI: + * 1. R_Port has TGT mode (COM or TGT), send PLOGI immediately + * 2. R_Port only with INI, send LOGO immediately + * 3. R_Port with unknown attribute, delay to send PLOGI + */ + if ((v_port_feature & UNF_PORT_MODE_TGT) || + (lport->enhanced_features & + UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF)) { + /* R_Port has TGT mode: send PLOGI immediately */ + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, nport_id); + UNF_CHECK_VALID(0x3615, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI to RPort(0x%x) failed", + lport->port_id, lport->nport_id, nport_id); + + unf_rport_error_recovery(rport); + } + } else if (v_port_feature == UNF_PORT_MODE_INI) { + /* R_Port only with INI mode: can't send PLOGI --->>> + * LOGO/nothing + */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + if (rport->rp_state == UNF_RPORT_ST_INIT) { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send LOGO to RPort(0x%x) which only with INI mode", + lport->port_id, lport->nport_id, nport_id); + + /* Enter Closing state */ + unf_rport_enter_logo(lport, rport); + } else { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + } else { + /* Unknown R_Port attribute: Delay to send PLOGI */ + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, + nport_id); + UNF_CHECK_VALID(0x3616, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_delay_login(rport); + } +} + +static void unf_rcv_gff_id_acc(struct unf_lport_s *v_lport, + struct unf_gffid_rsp_s *v_gff_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Delay to LOGIN */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + struct unf_gffid_rsp_s *gff_id_rsp_pld = v_gff_id_rsp_pld; + unsigned int fc4feature = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3617, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3618, UNF_TRUE, v_gff_id_rsp_pld, return); + + fc4feature = gff_id_rsp_pld->fc_4_feature[1]; + if ((UNF_GFF_ACC_MASK & fc4feature) == 0) + fc4feature = be32_to_cpu(gff_id_rsp_pld->fc_4_feature[1]); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) RPort(0x%x) received GFF_ID ACC. FC4 feature is 0x%x(1:TGT,2:INI,3:COM)", + lport->port_id, lport->nport_id, v_nport_id, fc4feature); + + /* Check (& Get new) R_Port */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + rport = unf_find_rport(lport, v_nport_id, rport->port_name); + + if ((rport) || + (UNF_GET_PORT_OPTIONS(fc4feature) != UNF_PORT_MODE_INI)) { + rport = unf_get_safe_rport(lport, rport, + UNF_RPORT_REUSE_ONLY, + v_nport_id); + UNF_CHECK_VALID(0x3619, UNF_TRUE, NULL != rport, return); + } else { + return; + } + + if ((fc4feature & UNF_GFF_ACC_MASK) != 0) { + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->options = UNF_GET_PORT_OPTIONS(fc4feature); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } else if (rport->port_name != INVALID_WWPN) { + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->options = unf_get_port_feature(rport->port_name); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + + /* NOTE: Send PLOGI if necessary */ + unf_check_rport_need_delay_plogi(lport, rport, rport->options); +} + +static void unf_rcv_gff_id_rjt(struct unf_lport_s *v_lport, + struct unf_gffid_rsp_s *v_gff_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Delay LOGIN or LOGO */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + struct unf_gffid_rsp_s *gff_id_rsp_pld = v_gff_id_rsp_pld; + unsigned int rjt_reason = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3620, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3621, UNF_TRUE, v_gff_id_rsp_pld, return); + + /* Check (& Get new) R_Port */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + rport = unf_find_rport(lport, v_nport_id, rport->port_name); + + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) get RPort by N_Port_ID(0x%x) failed and alloc new", + lport->port_id, v_nport_id); + + rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, + v_nport_id); + UNF_CHECK_VALID(0x3622, UNF_TRUE, NULL != rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + + rjt_reason = gff_id_rsp_pld->ctiu_pream.frag_reason_exp_vend; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GFF_ID for RPort(0x%x) but was rejected. Reason code(0x%x)", + lport->port_id, v_nport_id, rjt_reason); + + if (!UNF_GNN_GFF_ID_RJT_REASON(rjt_reason)) { + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, + v_nport_id); + UNF_CHECK_VALID(0x3623, UNF_TRUE, NULL != rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Delay to send PLOGI */ + unf_rport_delay_login(rport); + } else { + spin_lock_irqsave(&rport->rport_state_lock, flag); + if (rport->rp_state == UNF_RPORT_ST_INIT) { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Enter closing state */ + unf_rport_enter_logo(lport, rport); + } else { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + } +} + +static void unf_gff_id_callback(void *v_lport, void *v_sns_port, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_lport_s *root_lport = NULL; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_gffid_rsp_s *gff_id_rsp_pld = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x3626, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3627, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3628, UNF_TRUE, v_xchg, return); + + UNF_REFERNCE_VAR(v_sns_port); + nport_id = xchg->disc_port_id; + + gff_id_rsp_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gff_id_rsp; + cmnd_rsp_size = (gff_id_rsp_pld->ctiu_pream.cmnd_rsp_size); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* Case for GFF_ID ACC: (Delay)PLOGI */ + unf_rcv_gff_id_acc(lport, gff_id_rsp_pld, nport_id); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + /* Case for GFF_ID RJT: Delay PLOGI or LOGO directly */ + unf_rcv_gff_id_rjt(lport, gff_id_rsp_pld, nport_id); + } else { + /* Send PLOGI */ + unf_rcv_gff_id_rsp_unknown(lport, nport_id); + } +} + +unsigned int unf_send_gff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + struct unf_gffid_s *gff_id = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3367, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) + /* Lport is invalid, no retry or handle required, return ok */ + return RETURN_OK; + + root_lport = (struct unf_lport_s *)v_lport->root_lport; + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_sns_port->nport_id, + v_sns_port, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GFF_ID", + v_lport->port_id); + + return unf_get_and_post_disc_event(v_lport, v_sns_port, + v_nport_id, + UNF_DISC_GET_FEATURE); + } + + xchg->cmnd_code = NS_GFF_ID; /* GFF_ID */ + + xchg->disc_port_id = v_nport_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gff_id_ob_callback; /* send PLOGI */ + xchg->pfn_callback = unf_gff_id_callback; /* send PLOGI or LOGO */ + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, v_sns_port); + + /* Fill GFF_ID payload(entry) */ + gff_id = &fc_entry->gff_id; /* GFF_ID */ + memset(gff_id, 0, sizeof(struct unf_gffid_s)); + unf_fill_gff_id_pld(gff_id, v_nport_id); + + /* Send GFF_ID GS command now */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + else + atomic_dec( + &root_lport->disc.disc_thread_info.disc_contrl_size); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GFF_ID send %s. Port(0x%x)--->rport(0x%x). Inquire RPort(0x%x) OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_sns_port->nport_id, + v_nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gnn_id_pld(struct unf_gnnid_s *v_gnn_id_pld, + unsigned int v_nport_id) +{ + /* Inquiry R_Port node name from SW */ + UNF_CHECK_VALID(0x3368, UNF_TRUE, v_gnn_id_pld, return); + + v_gnn_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_gnn_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_gnn_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_GNN_ID; + v_gnn_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + v_gnn_id_pld->nport_id = v_nport_id; +} + +/* + * Function Name : unf_gnn_id_ob_callback + * Function Description: Callback for sending GNN_ID abnormal + * Input Parameters : struct unf_xchg_s *v_xchg + * Output Parameters : N/A + * Return Type : void + */ +static void unf_gnn_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Send GFF_ID */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *sns_port = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3597, UNF_TRUE, v_xchg, return); + lport = v_xchg->lport; + UNF_CHECK_VALID(0x3598, UNF_TRUE, lport, return); + sns_port = v_xchg->rport; + UNF_CHECK_VALID(0x3599, UNF_TRUE, sns_port, return); + nport_id = v_xchg->disc_port_id; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GNN_ID failed to inquire RPort(0x%x)", + lport->port_id, nport_id); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + /* NOTE: continue next stage */ + ret = unf_get_and_post_disc_event(lport, sns_port, nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, nport_id); // send PLOGI + } +} + +static void unf_rcv_gnn_id_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + struct unf_gnnid_rsp_s *v_gnn_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Send GFF_ID or Link down immediately */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *sns_port = v_sns_port; + struct unf_gnnid_rsp_s *gnn_id_rsp_pld = v_gnn_id_rsp_pld; + struct unf_rport_s *rport = NULL; + unsigned long long node_name = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3600, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3601, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3602, UNF_TRUE, v_gnn_id_rsp_pld, return); + + node_name = ((unsigned long long)(gnn_id_rsp_pld->node_name[0]) << + 32) | + ((unsigned long long)(gnn_id_rsp_pld->node_name[1])); + + if (node_name == lport->node_name) { + /* R_Port & L_Port with same Node Name */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) has the same node name(0x%llx) with RPort(0x%x), linkdown it", + lport->port_id, node_name, v_nport_id); + + /* Destroy immediately */ + unf_rport_immediate_linkdown(lport, rport); + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x) got RPort(0x%x) with node name(0x%llx) by GNN_ID", + lport->port_id, v_nport_id, node_name); + + /* Start to Send GFF_ID */ + ret = unf_get_and_post_disc_event(lport, sns_port, v_nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, + v_nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, v_nport_id); + } + } +} + +static void unf_rcv_gnn_id_rjt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + struct unf_gnnid_rsp_s *v_gnn_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Send GFF_ID */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *sns_port = v_sns_port; + struct unf_gnnid_rsp_s *gnn_id_rsp_pld = v_gnn_id_rsp_pld; + unsigned int rjt_reason = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3603, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3604, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3605, UNF_TRUE, v_gnn_id_rsp_pld, return); + + rjt_reason = (gnn_id_rsp_pld->ctiu_pream.frag_reason_exp_vend); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) GNN_ID was rejected with reason code(0x%x)", + lport->port_id, lport->nport_id, rjt_reason); + + if (!UNF_GNN_GFF_ID_RJT_REASON(rjt_reason)) { + /* Node existence: Continue next stage */ + ret = unf_get_and_post_disc_event(lport, sns_port, v_nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, + v_nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, v_nport_id); + } + } +} + +static void unf_gnn_id_callback(void *v_lport, void *v_sns_port, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *sns_port = (struct unf_rport_s *)v_sns_port; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_gnnid_rsp_s *gnn_id_rsp_pld = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3608, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3609, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3610, UNF_TRUE, v_xchg, return); + + nport_id = xchg->disc_port_id; + gnn_id_rsp_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gnn_id_rsp; + cmnd_rsp_size = (gnn_id_rsp_pld->ctiu_pream.cmnd_rsp_size); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* Case ACC: send GFF_ID or Link down immediately */ + unf_rcv_gnn_id_acc(lport, sns_port, gnn_id_rsp_pld, nport_id); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + /* Case RJT: send GFF_ID */ + unf_rcv_gnn_id_rjt(lport, sns_port, gnn_id_rsp_pld, nport_id); + } else { /* NOTE: continue next stage */ + /* Case unknown: send GFF_ID */ + unf_rcv_gnn_id_rsp_unknown(lport, sns_port, nport_id); + } +} + +unsigned int unf_send_gnn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + /* from DISC stop/re-login */ + struct unf_gnnid_s *gnn_id_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3370, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) + /* Lport is invalid, no retry or handle required, return ok */ + return RETURN_OK; + + root_lport = (struct unf_lport_s *)v_lport->root_lport; + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_sns_port->nport_id, + v_sns_port, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) exchange can't be NULL for GNN_ID", + v_lport->port_id); + + return unf_get_and_post_disc_event(v_lport, v_sns_port, + v_nport_id, + UNF_DISC_GET_NODE_NAME); + } + + xchg->cmnd_code = NS_GNN_ID; /* GNN_ID */ + xchg->disc_port_id = v_nport_id; + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gnn_id_ob_callback; /* send GFF_ID */ + xchg->pfn_callback = unf_gnn_id_callback; /* send GFF_ID */ + + unf_fill_package(&pkg, xchg, v_sns_port); + + /* Fill GNN_ID entry(payload) */ + gnn_id_pld = &fc_entry->gnn_id; /* GNNID payload */ + memset(gnn_id_pld, 0, sizeof(struct unf_gnnid_s)); + unf_fill_gnn_id_pld(gnn_id_pld, v_nport_id); + + /* Start to send GNN_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + else + atomic_dec( + &root_lport->disc.disc_thread_info.disc_contrl_size); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GNN_ID send %s. Port(0x%x_0x%x)--->rport(0x%x) inquire Nportid(0x%x) OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", v_lport->port_id, + v_lport->nport_id, v_sns_port->nport_id, + v_nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gpn_id_pld(struct unf_gpnid_s *v_gpn_id_pld, + unsigned int v_nport_id) +{ + UNF_CHECK_VALID(0x3371, UNF_TRUE, v_gpn_id_pld, return); + + v_gpn_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_gpn_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_gpn_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_GPN_ID; + v_gpn_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + /* Inquiry WWN from SW */ + v_gpn_id_pld->nport_id = v_nport_id; +} + +unsigned int unf_rport_relogin(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + /* Send GNN_ID */ + struct unf_rport_s *sns_port = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3563, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* Get SNS R_Port */ + sns_port = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find fabric Port", + v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + /* Send GNN_ID now to SW */ + ret = unf_get_and_post_disc_event(v_lport, sns_port, v_nport_id, + UNF_DISC_GET_NODE_NAME); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + v_lport->nport_id, UNF_DISC_GET_NODE_NAME, + v_nport_id); + + /* NOTE: Continue to next stage */ + unf_rcv_gnn_id_rsp_unknown(v_lport, sns_port, v_nport_id); + } + + return ret; +} + +static void unf_rcv_gpn_id_acc(struct unf_lport_s *v_lport, + unsigned int v_nport_id, + unsigned long long v_port_name) +{ + /* then PLOGI or re-login */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + rport = unf_find_valid_rport(lport, v_port_name, v_nport_id); + if (rport) { + /* R_Port with TGT mode & L_Port with INI mode: + * send PLOGI with INIT state + */ + if ((rport->options & UNF_PORT_MODE_TGT) == + UNF_PORT_MODE_TGT) { + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_INIT, + v_nport_id); + UNF_CHECK_VALID(0x3630, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI failed for 0x%x, enter recovry", + lport->port_id, lport->nport_id, + v_nport_id); + + unf_rport_error_recovery(rport); + } + } else { + spin_lock_irqsave(&rport->rport_state_lock, flag); + if ((rport->rp_state != UNF_RPORT_ST_PLOGI_WAIT) && + (rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) && + (rport->rp_state != UNF_RPORT_ST_READY)) { + unf_rport_state_ma(rport, + UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore( + &rport->rport_state_lock, flag); + + /* Do LOGO operation */ + unf_rport_enter_logo(lport, rport); + } else { + spin_unlock_irqrestore( + &rport->rport_state_lock, flag); + } + } + } else { + /* Send GNN_ID */ + (void)unf_rport_relogin(lport, v_nport_id); + } +} + +static void unf_rcv_gpn_id_rjt(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x3631, UNF_TRUE, v_lport, return); + + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + unf_rport_linkdown(lport, rport); /* Do R_Port Link down */ +} + +/* + * Function Name : unf_rcv_gpn_id_rsp_unknown + * Function Description: Process unknown type of GPN_ID response + * Input Parameters : struct unf_lport_s *v_lport + * : unsigned int v_nport_id + * Output Parameters : N/A + * Return Type : void + */ +void unf_rcv_gpn_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + struct unf_lport_s *lport = v_lport; + + UNF_CHECK_VALID(0x3632, UNF_TRUE, v_lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) wrong response of GPN_ID with RPort(0x%x)", + lport->port_id, v_nport_id); + + /* NOTE: go to next stage */ + (void)unf_rport_relogin(lport, v_nport_id); +} + +static void unf_gpn_id_callback(void *v_lport, void *v_sns_port, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_gpnid_rsp_s *gpn_id_rsp_pld = NULL; + unsigned long long port_name = 0; + unsigned int cmnd_rsp_size = 0; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3635, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3636, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3637, UNF_TRUE, v_xchg, return); + + UNF_REFERNCE_VAR(v_sns_port); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_xchg; + nport_id = xchg->disc_port_id; + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + gpn_id_rsp_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gpn_id_rsp; + cmnd_rsp_size = gpn_id_rsp_pld->ctiu_pream.cmnd_rsp_size; + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* GPN_ID ACC */ + port_name = ((unsigned long long) + (gpn_id_rsp_pld->port_name[0]) << 32) | + ((unsigned long long) + (gpn_id_rsp_pld->port_name[1])); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x) GPN_ID ACC with WWN(0x%llx) RPort NPort ID(0x%x)", + lport->port_id, port_name, nport_id); + + /* Send PLOGI or LOGO or GNN_ID */ + unf_rcv_gpn_id_acc(lport, nport_id, port_name); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == + UNF_CT_IU_REJECT) { + /* GPN_ID RJT: Link Down */ + unf_rcv_gpn_id_rjt(lport, nport_id); + } else { + /* GPN_ID response type unknown: Send GNN_ID */ + unf_rcv_gpn_id_rsp_unknown(lport, nport_id); + } +} + +static void unf_gpn_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3633, UNF_TRUE, v_xchg, return); + + lport = v_xchg->lport; + nport_id = v_xchg->disc_port_id; + UNF_CHECK_VALID(0x3634, UNF_TRUE, lport, return); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GPN_ID failed to inquire RPort(0x%x)", + lport->port_id, nport_id); + + /* NOTE: go to next stage */ + (void)unf_rport_relogin(lport, nport_id); +} + +unsigned int unf_send_gpn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + struct unf_gpnid_s *gpn_id_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3374, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) { + /* Lport is invalid, no retry or handle required, return ok */ + return RETURN_OK; + } + root_lport = (struct unf_lport_s *)v_lport->root_lport; + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_sns_port->nport_id, + v_sns_port, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GPN_ID", + v_lport->port_id); + + return unf_get_and_post_disc_event(v_lport, v_sns_port, + v_nport_id, + UNF_DISC_GET_PORT_NAME); + } + + xchg->cmnd_code = NS_GPN_ID; // GPN_ID + xchg->disc_port_id = v_nport_id; + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_gpn_id_callback; + /* re-login --->>> GNN_ID */ + xchg->pfn_ob_callback = unf_gpn_id_ob_callback; + + unf_fill_package(&pkg, xchg, v_sns_port); + + /* Fill GPN_ID entry(payload) */ + gpn_id_pld = &fc_entry->gpn_id; /* GPN_ID payload */ + memset(gpn_id_pld, 0, sizeof(struct unf_gpnid_s)); + unf_fill_gpn_id_pld(gpn_id_pld, v_nport_id); + + /* Send GPN_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + else + atomic_dec( + &root_lport->disc.disc_thread_info.disc_contrl_size); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GPN_ID send %s. Port(0x%x)--->rport(0x%x). Inquire RPort(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", v_lport->port_id, + v_sns_port->nport_id, v_nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gid_ft_pld(struct unf_gid_s *v_gid_pld) +{ + UNF_CHECK_VALID(0x3376, UNF_TRUE, v_gid_pld, return); + + v_gid_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_gid_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_gid_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_GID_FT; + v_gid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + v_gid_pld->scope_type = UNF_GID_FT_TYPE; +} + +static void unf_gid_ft_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + union unf_sfs_u *sfs_ptr = NULL; + struct unf_disc_s *disc = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3589, UNF_TRUE, v_xchg, return); + + sfs_ptr = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!sfs_ptr) + return; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + if (!lport) + return; + + disc = &lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery operation */ + unf_disc_error_recovery(lport); +} + +unsigned int unf_send_gid_ft(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_gid_s *gid_pld = NULL; + struct unf_gid_rsp_s *gid_rsp = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3377, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3378, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GID_FT", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_GID_FT; // GID_FT + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gid_ft_ob_callback; // do DISC recovery + xchg->pfn_callback = unf_gid_ft_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill GID_FT entry(payload) */ + gid_pld = &fc_entry->get_id.gid_req; /* GID req payload */ + unf_fill_gid_ft_pld(gid_pld); + gid_rsp = &fc_entry->get_id.gid_rsp; /* GID rsp payload */ + + /* Get GID_FT Response payload */ + gid_acc_pld = (struct unf_gif_acc_pld_s *)unf_get_one_big_sfs_buf(xchg); + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate GID_FT response buffer failed", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + memset(gid_acc_pld, 0, sizeof(struct unf_gif_acc_pld_s)); + gid_rsp->gid_acc_pld = gid_acc_pld; + + /* Send GID_FT GS commmand now */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GID_FT send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gid_pt_pld(struct unf_gid_s *v_gid_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3379, UNF_TRUE, v_gid_pld, return); + UNF_CHECK_VALID(0x3380, UNF_TRUE, v_lport, return); + + v_gid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); + v_gid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); + v_gid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GID_PT); + v_gid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + /* 0x7F000000 means NX_Port */ + v_gid_pld->scope_type = UNF_GID_PT_TYPE; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_gid_pld, + sizeof(struct unf_gid_s)); +} + +static void unf_gid_pt_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + union unf_sfs_u *sfs_ptr = NULL; + struct unf_disc_s *disc = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3593, UNF_TRUE, v_xchg, return); + + sfs_ptr = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!sfs_ptr) + return; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + if (!lport) + return; + + disc = &lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery operation */ + unf_disc_error_recovery(lport); +} + +unsigned int unf_send_gid_pt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* from DISC start */ + struct unf_gid_s *gid_pld = NULL; + struct unf_gid_rsp_s *gid_rsp = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3381, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3382, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GID_PT", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_GID_PT; /* GID_PT */ + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gid_pt_ob_callback; /* do DISC recovery */ + xchg->pfn_callback = unf_gid_pt_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill GID_PT entry(payload) */ + gid_pld = &fc_entry->get_id.gid_req; /* GID req payload */ + unf_fill_gid_pt_pld(gid_pld, v_lport); + gid_rsp = &fc_entry->get_id.gid_rsp; /* GID rsp payload */ + + /* Get GID_PT response payload */ + gid_acc_pld = (struct unf_gif_acc_pld_s *)unf_get_one_big_sfs_buf(xchg); + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%0x) Allocate GID_PT response buffer failed", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + memset(gid_acc_pld, 0, sizeof(struct unf_gif_acc_pld_s)); + gid_rsp->gid_acc_pld = gid_acc_pld; + + /* Send GID_PT GS command to SW */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GID_PT send %s. Port(0x%x_0x%x)--->rport(0x%x) with OXID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rft_id_pld(struct unf_rftid_s *v_rft_id_pld, + struct unf_lport_s *v_lport) +{ + unsigned int i = 1; + + UNF_CHECK_VALID(0x3383, UNF_TRUE, v_rft_id_pld, return); + UNF_CHECK_VALID(0x3384, UNF_TRUE, v_lport, return); + + v_rft_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_rft_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_rft_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_RFT_ID; + v_rft_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + v_rft_id_pld->nport_id = (v_lport->nport_id); + v_rft_id_pld->fc_4_types[0] = (UNF_FC4_SCSI_BIT8); + + for (i = 1; i < 8; i++) + v_rft_id_pld->fc_4_types[i] = 0; +} + +static void unf_rft_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3687, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3688, UNF_TRUE, lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send RFT_ID failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery operation */ + unf_lport_error_recovery(lport); +} + +static void unf_rft_id_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RFT_ID --->>> RFF_ID */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_ctiu_prem_s *ctiu_prem = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmnd_rsp_size = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3689, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3690, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3691, UNF_TRUE, v_xchg, return); + + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + xchg = (struct unf_xchg_s *)v_xchg; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) SFS entry is NULL with state(0x%x)", + lport->port_id, lport->en_states); + return; + } + + ctiu_prem = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rft_id_rsp.ctiu_pream; + cmnd_rsp_size = ctiu_prem->cmnd_rsp_size; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) RFT_ID response is (0x%x)", + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), + lport->port_id, lport->nport_id); + + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* Case for RFT_ID ACC: send RFF_ID */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_RFT_ID_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) receive RFT_ID ACC in state(0x%x)", + lport->port_id, lport->nport_id, + lport->en_states); + + return; + } + + /* LPort: RFT_ID_WAIT --> RFF_ID_WAIT */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Start to send RFF_ID GS command */ + ret = unf_send_rff_id(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send RFF_ID failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); + } + } else { + /* Case for RFT_ID RJT: do recovery */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) receive RFT_ID RJT with reason_code(0x%x) explanation(0x%x)", + lport->port_id, lport->nport_id, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_REASON_MASK, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_EXPLAN_MASK); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); + } +} + +unsigned int unf_send_rft_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* After PLOGI process */ + struct unf_rftid_s *rft_id = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3385, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3386, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for RFT_ID", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_RFT_ID; /* RFT_ID */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_rft_id_callback; + xchg->pfn_ob_callback = unf_rft_id_ob_callback; /* Do L_Port recovery */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill RFT_ID entry(payload) */ + rft_id = &fc_entry->rft_id; + memset(rft_id, 0, sizeof(struct unf_rftid_s)); + unf_fill_rft_id_pld(rft_id, v_lport); + + /* Send RFT_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: RFT_ID send %s. Port(0x%x_0x%x)--->rport(0x%x). rport(0x%p) wwpn(0x%llx) OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, v_rport->nport_id, + v_rport, v_rport->port_name, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rff_id_pld(struct unf_rffid_s *v_rff_id_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3387, UNF_TRUE, v_rff_id_pld, return); + UNF_CHECK_VALID(0x3388, UNF_TRUE, v_lport, return); + + v_rff_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_rff_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_rff_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_RFF_ID; + v_rff_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + v_rff_id_pld->nport_id = v_lport->nport_id; + v_rff_id_pld->fc_4_feature = UNF_FC4_FCP_TYPE | + (v_lport->options << 4); +} + +static void unf_rff_id_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RFF_ID --->>> SCR(for INI mode) */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_ctiu_prem_s *ctiu_prem = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmnd_rsp_size = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3684, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3685, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3686, UNF_TRUE, v_xchg, return); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_xchg; + if (unlikely(!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr)) + return; + + /* SCR send to 0xfffffd(not 0xfffffc), need to get new R_Port */ + UNF_REFERNCE_VAR(v_rport); + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FCTRL); // 0xfffffd + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + UNF_FC_FID_FCTRL); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't allocate RPort(0x%x)", + lport->port_id, UNF_FC_FID_FCTRL); + return; + } + + rport->nport_id = UNF_FC_FID_FCTRL; + ctiu_prem = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rff_id_rsp.ctiu_pream; + cmnd_rsp_size = ctiu_prem->cmnd_rsp_size; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x_0x%x) RFF_ID rsp is (0x%x)", + lport->port_id, lport->nport_id, + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)); + + /* RSP Type check: some SW not support RFF_ID, go to next stage also */ + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) receive RFF ACC(0x%x) in state(0x%x)", + lport->port_id, lport->nport_id, + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), + lport->en_states); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) receive RFF RJT(0x%x) in state(0x%x) with RJT reason code(0x%x) explanation(0x%x)", + lport->port_id, lport->nport_id, + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), + lport->en_states, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_REASON_MASK, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_EXPLAN_MASK); + } + + /* L_Port state check */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_RFF_ID_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) receive RFF reply in state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + return; + } + + /* Update L_Port state & Send SCR to remote port */ + /* LPort: RFF_ID_WAIT --> SCR_WAIT */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Start to send SCR command */ + ret = unf_send_scr(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send SCR failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); + } +} + +static void unf_rff_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3682, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3683, UNF_TRUE, NULL != lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send RFF_ID failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_rff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* from RFT_ID, then Send SCR */ + struct unf_rffid_s *rff_id = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3389, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3390, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "%s Enter", __func__); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for RFF_ID", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_RFF_ID; // RFF_ID + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_rff_id_callback; + xchg->pfn_ob_callback = unf_rff_id_ob_callback; /* Do L_Port recovery */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill RFF_ID entry(payload) */ + rff_id = &fc_entry->rff_id; + memset(rff_id, 0, sizeof(struct unf_rffid_s)); + unf_fill_rff_id_pld(rff_id, v_lport); + + /* Send RFF_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: RFF_ID feature 0x%x(10:TGT,20:INI,30:COM) send %s. Port(0x%x_0x%x)--->pstRPortid(0x%x) rport(0x%p) OX_ID(0x%x)", + v_lport->options, (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_login_with_rport_in_n2n(struct unf_lport_s *v_lport, + unsigned long long v_remote_port_name, + unsigned long long v_remote_nort_name) +{ + /* + * Call by (P2P): + * 1. RCVD FLOGI ACC + * 2. Send FLOGI ACC succeed + ** + * Compare WWN, larger is master, then send PLOGI + */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long lport_flag = 0; + unsigned long rport_flag = 0; + unsigned long long port_name = 0; + unsigned long long node_name = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3539, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&lport->lport_state_lock, lport_flag); + /* LPort: FLOGI_WAIT --> READY */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_READY); + spin_unlock_irqrestore(&lport->lport_state_lock, lport_flag); + + port_name = v_remote_port_name; + node_name = v_remote_nort_name; + + if (lport->port_name > port_name) { + /* Master case: send PLOGI */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x)'s WWN(0x%llx) is larger than rport(0x%llx), should be master", + lport->port_id, lport->port_name, port_name); + + /* Update N_Port_ID now: 0xEF */ + lport->nport_id = UNF_P2P_LOCAL_NPORT_ID; + + rport = unf_find_valid_rport(v_lport, port_name, + UNF_P2P_REMOTE_NPORT_ID); // 0xD6 + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, + UNF_P2P_REMOTE_NPORT_ID); + if (rport) { + rport->node_name = node_name; + rport->port_name = port_name; + rport->nport_id = UNF_P2P_REMOTE_NPORT_ID; // 0xD6 + rport->local_nport_id = UNF_P2P_LOCAL_NPORT_ID; // 0xEF + + spin_lock_irqsave(&rport->rport_state_lock, + rport_flag); + if ((rport->rp_state == UNF_RPORT_ST_PLOGI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x) Rport(0x%x) have sent PLOGI or PRLI with state(0x%x)", + lport->port_id, rport->nport_id, + rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_flag); + return; + } + /* Update L_Port State: PLOGI_WAIT */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_flag); + + /* P2P with master: Start to Send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x) with WWN(0x%llx) send PLOGI to(0x%llx) failed", + lport->port_id, lport->port_name, + port_name); + + unf_rport_error_recovery(rport); + } + } else { + /* Get/Alloc R_Port failed */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) with WWN(0x%llx) allocate RPort(ID:0x%x,WWPN:0x%llx) failed", + lport->port_id, lport->port_name, + UNF_P2P_REMOTE_NPORT_ID, port_name); + } + } else { + /* Slave case: L_Port's Port Name is smaller than R_Port */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) with WWN(0x%llx) is smaller than rport(0x%llx), do nothing", + lport->port_id, lport->port_name, port_name); + } +} + +static void unf_flogi_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Callback for Sending FLOGI ACC succeed */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + unsigned long long port_name = 0; + unsigned long long node_name = 0; + + UNF_CHECK_VALID(0x3457, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3458, UNF_TRUE, v_xchg->lport, return); + UNF_CHECK_VALID(0x3459, UNF_TRUE, v_xchg->rport, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + spin_lock_irqsave(&rport->rport_state_lock, flags); + port_name = rport->port_name; + node_name = rport->node_name; + + /* Swap case: Set WWPN & WWNN with zero */ + rport->port_name = 0; + rport->node_name = 0; + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Enter PLOGI stage: after send FLOGI ACC succeed */ + unf_login_with_rport_in_n2n(lport, port_name, node_name); +} + +unsigned int unf_send_flogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_flogi_payload_s *flogi_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3393, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3394, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3395, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_FLOGI); + + v_xchg->did = 0; /* D_ID must be 0 */ + v_xchg->sid = UNF_FC_FID_FLOGI; /* S_ID must be 0xfffffe */ + v_xchg->oid = v_xchg->sid; + v_xchg->pfn_callback = NULL; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + /* call back for sending FLOGI response */ + v_xchg->pfn_ob_callback = unf_flogi_acc_ob_callback; + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + unf_fill_package(&pkg, v_xchg, v_rport); + + /* Fill FLOGI ACC payload */ + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + flogi_acc_pld = &fc_entry->flogi_acc.flogi_payload; + flogi_acc_pld->cmnd = (UNF_ELS_CMND_ACC); + unf_fill_flogi_pld(flogi_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + /* Send FLOGI ACC to remote port */ + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]LOGIN: FLOGI ACC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_plogi_acc_pld(struct unf_plogi_payload_s *v_plogi_acc_pld, + struct unf_lport_s *v_lport) +{ + struct unf_lgn_parms_s *login_parms = NULL; + + UNF_CHECK_VALID(0x3396, UNF_TRUE, v_plogi_acc_pld, return); + UNF_CHECK_VALID(0x3397, UNF_TRUE, v_lport, return); + + v_plogi_acc_pld->cmnd = (UNF_ELS_CMND_ACC); + login_parms = &v_plogi_acc_pld->parms; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + login_parms->co_parms.bb_credit = + unf_low_level_bb_credit(v_lport); + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_NFPORT; /* 0 */ + login_parms->co_parms.bb_scn = + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) ? + 0 : unf_low_level_bbscn(v_lport); + } else { + login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; /* 1 */ + } + + login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; + login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; + login_parms->co_parms.continuously_increasing = + UNF_CONTIN_INCREASE_SUPPORT; + login_parms->co_parms.bb_receive_data_field_size = + v_lport->max_frame_size; + login_parms->co_parms.nport_total_concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); + login_parms->co_parms.e_d_tov = (v_lport->ed_tov); + login_parms->cl_parms[2].valid = UNF_CLASS_VALID; /* class-3 */ + login_parms->cl_parms[2].received_data_field_size = + v_lport->max_frame_size; + login_parms->cl_parms[2].concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->cl_parms[2].open_sequences_per_exchange = + UNF_PLOGI_SEQ_PER_XCHG; + login_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + login_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + login_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + login_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_plogi_acc_pld, + sizeof(struct unf_plogi_payload_s)); +} + +static void unf_schedule_open_work(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Used for L_Port port only with TGT, or R_Port only with INI */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + unsigned long delay = 0; + unsigned long flag = 0; + unsigned int ret = 0; + unsigned int port_feature = INVALID_VALUE32; + + UNF_CHECK_VALID(0x3452, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3453, UNF_TRUE, v_rport, return); + + delay = (unsigned long)lport->ed_tov; + port_feature = rport->options & UNF_PORT_MODE_BOTH; + + if ((lport->options == UNF_PORT_MODE_TGT) || + (port_feature == UNF_PORT_MODE_INI)) { + spin_lock_irqsave(&rport->rport_state_lock, flag); + + ret = unf_rport_ref_inc(rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) abnormal, no need open", + lport->port_id, lport->nport_id, + rport->nport_id); + + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + return; + } + + /* Delay work pending check */ + if (delayed_work_pending(&rport->open_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) open work is running, no need re-open", + lport->port_id, lport->nport_id, + rport->nport_id); + + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + unf_rport_ref_dec(rport); + return; + } + + /* start open work */ + if (queue_delayed_work( + unf_work_queue, + &rport->open_work, + (unsigned long) + msecs_to_jiffies((unsigned int)delay))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) start open work", + lport->port_id, lport->nport_id, + rport->nport_id); + + (void)unf_rport_ref_inc(rport); + } + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_ref_dec(rport); + } +} + +static void unf_plogi_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3454, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + UNF_CHECK_VALID(0x3455, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3456, UNF_TRUE, rport, return); + + /* + * 1. According to FC-LS 4.2.7.1: + * after RCVD PLOGI or sending PLOGI ACC, need to termitate open EXCH + */ + unf_cm_xchg_mgr_abort_io_by_id(lport, rport, rport->nport_id, + lport->nport_id, 0); + + /* 2. Send PLOGI ACC fail */ + if (v_xchg->ob_callback_sts != UNF_IO_SUCCESS) { + /* Do R_Port recovery */ + unf_rport_error_recovery(rport); + + /* Do not care: Just used for L_Port only is + * TGT mode or R_Port only is INI mode + */ + unf_schedule_open_work(lport, rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x_0x%x) send PLOGI ACC failed(0x%x) with RPort(0x%x) feature(0x%x)", + lport->port_id, lport->nport_id, + lport->options, v_xchg->ob_callback_sts, + rport->nport_id, rport->options); + + /* NOTE: return */ + return; + } + + /* 3. Private Loop: check whether or not need to send PRLI */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + if ((lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) && + ((rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_READY))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) with State(0x%x) return directly", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + /* Do nothing */ + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + return; + } + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PRLI); // PRLI_WAIT + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* 4. Set Port Feature with BOTH: cancel */ + if ((rport->options == UNF_PORT_MODE_UNKNOWN) && + (rport->port_name != INVALID_WWPN)) + rport->options = unf_get_port_feature(rport->port_name); + + /* + * 5. Check whether need to send PRLI delay + * Call by: RCVD PLOGI ACC or callback for sending PLOGI ACC succeed + */ + unf_check_rport_need_delay_prli(lport, rport, rport->options); + + /* 6. Do not care: Just used for L_Port only is + * TGT mode or R_Port only is INI mode + */ + unf_schedule_open_work(lport, rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x_0x%x) send PLOGI ACC succeed with RPort(0x%x) feature(0x%x)", + lport->port_id, lport->nport_id, lport->options, + rport->nport_id, rport->options); +} + +unsigned int unf_send_plogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_plogi_payload_s *plogi_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3398, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3399, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3400, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PLOGI); + + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->pfn_callback = NULL; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + /* call back for sending PLOGI ACC */ + v_xchg->pfn_ob_callback = unf_plogi_acc_ob_callback; + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* Fill PLOGI ACC payload */ + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + plogi_acc_pld = &fc_entry->plogi_acc.payload; + unf_fill_plogi_acc_pld(plogi_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + /* Start to Send PLOGI ACC now */ + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + /* NOTE: free exchange */ + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + if ((v_rport->nport_id < UNF_FC_FID_DOM_MGR) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PLOGI ACC send %s. Port(0x%x_0x%x_0x%llx)--->rport(0x%x_0x%llx) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_lport->port_name, + v_rport->nport_id, v_rport->port_name, + ox_id, rx_id); + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_rjt_pld(struct unf_els_rjt_s *v_els_rjt, + unsigned int v_reason_code, + unsigned int v_reason_explanation) +{ + UNF_CHECK_VALID(0x3401, UNF_TRUE, v_els_rjt, return); + + v_els_rjt->cmnd = UNF_ELS_CMND_RJT; + v_els_rjt->reason_code = (v_reason_code | v_reason_explanation); +} + +static void unf_fill_prli_acc_pld(struct unf_pril_payload_s *v_prli_acc_pld, + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned int port_mode = UNF_FC4_FRAME_PARM_3_TGT; + + UNF_CHECK_VALID(0x3402, UNF_TRUE, v_prli_acc_pld, return); + UNF_CHECK_VALID(0x3403, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3404, UNF_TRUE, v_rport, return); + + v_prli_acc_pld->cmnd = ( + UNF_ELS_CMND_ACC | + ((unsigned int)UNF_FC4_FRAME_PAGE_SIZE << + UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | + ((unsigned int)(sizeof(struct unf_pril_payload_s) - + UNF_PRLI_SIRT_EXTRA_SIZE))); + + v_prli_acc_pld->parms[0] = (UNF_FC4_FRAME_PARM_0_FCP | + UNF_FC4_FRAME_PARM_0_I_PAIR | + UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE); + v_prli_acc_pld->parms[1] = UNF_NOT_MEANINGFUL; + v_prli_acc_pld->parms[2] = UNF_NOT_MEANINGFUL; + + /* About INI/TGT mode */ + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) + /* return INI (0x20): R_Port has TGT mode, + * L_Port has INI mode + */ + port_mode = UNF_FC4_FRAME_PARM_3_INI; + else + port_mode = v_lport->options; + + /* About Read xfer_rdy disable */ + v_prli_acc_pld->parms[3] = (UNF_FC4_FRAME_PARM_3_R_XFER_DIS | + port_mode); /* 0x2 */ + + /* About Tape support */ + if (v_rport->tape_support_needed) { + v_prli_acc_pld->parms[3] |= + (UNF_FC4_FRAME_PARM_3_REC_SUPPORT | + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT | + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT | + UNF_FC4_FRAME_PARM_3_CONF_ALLOW); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "PRLI ACC tape support"); + } + + /* About confirm */ + if (v_lport->low_level_func.lport_cfg_items.fcp_conf == UNF_TRUE) + /* 0x80 */ + v_prli_acc_pld->parms[3] |= UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_prli_acc_pld, sizeof(struct unf_pril_payload_s)); +} + +static void unf_prli_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Report R_Port scsi Link Up */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + enum unf_rport_login_state_e rport_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3449, UNF_TRUE, v_xchg, return); + lport = v_xchg->lport; + rport = v_xchg->rport; + UNF_CHECK_VALID(0x3450, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3451, UNF_TRUE, rport, return); + + /* Update & Report Link Up */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_READY); // READY + rport_state = rport->rp_state; + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[event]LOGIN: Port(0x%x) RPort(0x%x) state(0x%x) WWN(0x%llx) prliacc", + lport->port_id, rport->nport_id, + rport->rp_state, rport->port_name); + } + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + if (rport_state == UNF_RPORT_ST_READY) { + rport->logo_retries = 0; + unf_update_lport_state_by_linkup_event(lport, rport, + rport->options); + } +} + +unsigned int unf_send_prli_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_pril_payload_s *prli_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3405, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3406, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3407, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PRLI); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + v_xchg->pfn_callback = NULL; + /* callback when send succeed */ + v_xchg->pfn_ob_callback = unf_prli_acc_ob_callback; + + /* Fill common package */ + unf_fill_package(&pkg, v_xchg, v_rport); + + /* Get FC entry (alloc when create exchange) */ + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* Fill FRLI Payload */ + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + prli_acc_pld = &fc_entry->prli_acc.payload; + unf_fill_prli_acc_pld(prli_acc_pld, v_lport, v_rport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + /* Send ELS (RPLI) RSP */ + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + if ((v_rport->nport_id < UNF_FC_FID_DOM_MGR) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLI ACC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_send_rec_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + /* Reserved */ + UNF_REFERNCE_VAR(v_lport); + UNF_REFERNCE_VAR(v_rport); + UNF_REFERNCE_VAR(v_xchg); + + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + return RETURN_OK; +} + +static void unf_rrq_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x3408, UNF_TRUE, v_xchg, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]RRQ ACC Xchg(0x%p) tag(0x%x)", + v_xchg, v_xchg->hot_pool_tag); + + UNF_REFERNCE_VAR(v_xchg); +} + +static void unf_fill_els_acc_pld(struct unf_els_acc_s *v_els_acc_pld) +{ + UNF_CHECK_VALID(0x3420, UNF_TRUE, v_els_acc_pld, return); + + v_els_acc_pld->cmnd = UNF_ELS_CMND_ACC; +} + +static void unf_rscn_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +static unsigned int unf_send_rscn_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_els_acc_s *rscn_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3421, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3422, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3423, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RSCN); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + /* Set call back function */ + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_rscn_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + rscn_acc = &fc_entry->els_acc; + unf_fill_els_acc_pld(rscn_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: RSCN ACC send %s. Port(0x%x)--->rport(0x%x) with OXID(0x%x) RXID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_logo_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +unsigned int unf_send_logo_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_els_acc_s *logo_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3424, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3425, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3426, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_LOGO); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_logo_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + logo_acc = &fc_entry->els_acc; + unf_fill_els_acc_pld(logo_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: LOGO ACC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_send_rrq_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_els_acc_s *rrq_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg = { 0 }; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3427, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3428, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3429, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + v_xchg->pfn_callback = NULL; // do noting + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + rrq_acc = &fc_entry->els_acc; + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RRQ); + v_xchg->pfn_ob_callback = unf_rrq_acc_ob_callback; // do noting + unf_fill_els_acc_pld(rrq_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + unf_fill_package(&pkg, v_xchg, v_rport); + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RRQ ACC send %s. Port(0x%x)--->rport(0x%x) with Xchg(0x%p) OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, v_xchg, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_pdisc_acc_pld(struct unf_plogi_payload_s *v_pdisc_acc_pld, + struct unf_lport_s *v_lport) +{ + struct unf_lgn_parms_s *login_parms = NULL; + + UNF_CHECK_VALID(0x3430, UNF_TRUE, v_pdisc_acc_pld, return); + UNF_CHECK_VALID(0x3431, UNF_TRUE, v_lport, return); + + v_pdisc_acc_pld->cmnd = UNF_ELS_CMND_ACC; + login_parms = &v_pdisc_acc_pld->parms; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + login_parms->co_parms.bb_credit = + unf_low_level_bb_credit(v_lport); + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_NFPORT; + login_parms->co_parms.bb_scn = + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) ? + 0 : unf_low_level_bbscn(v_lport); + } else { + login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; + } + + login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; + login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; + login_parms->co_parms.continuously_increasing = + UNF_CONTIN_INCREASE_SUPPORT; + login_parms->co_parms.bb_receive_data_field_size = + v_lport->max_frame_size; + login_parms->co_parms.nport_total_concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->co_parms.relative_offset = UNF_PLOGI_RO_CATEGORY; + login_parms->co_parms.e_d_tov = v_lport->ed_tov; + + login_parms->cl_parms[2].valid = UNF_CLASS_VALID; // class-3 + login_parms->cl_parms[2].received_data_field_size = + v_lport->max_frame_size; + login_parms->cl_parms[2].concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->cl_parms[2].open_sequences_per_exchange = + UNF_PLOGI_SEQ_PER_XCHG; + + login_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + login_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + login_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + login_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_pdisc_acc_pld, + sizeof(struct unf_plogi_payload_s)); +} + +static void unf_pdisc_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +unsigned int unf_send_pdisc_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_plogi_payload_s *pdisc_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3432, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3433, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3434, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PDISC); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + /* Set call back function */ + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_pdisc_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + pdisc_acc_pld = &fc_entry->pdisc_acc.payload; + unf_fill_pdisc_acc_pld(pdisc_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send PDISC ACC %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_adisc_acc_pld(struct unf_adisc_payload_s *v_adisc_acc_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3435, UNF_TRUE, v_adisc_acc_pld, return); + UNF_CHECK_VALID(0x3436, UNF_TRUE, v_lport, return); + + v_adisc_acc_pld->cmnd = (UNF_ELS_CMND_ACC); + + v_adisc_acc_pld->hard_address = (v_lport->nport_id & UNF_ALPA_MASK); + v_adisc_acc_pld->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + v_adisc_acc_pld->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + v_adisc_acc_pld->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + v_adisc_acc_pld->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + v_adisc_acc_pld->nport_id = v_lport->nport_id; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_adisc_acc_pld, + sizeof(struct unf_adisc_payload_s)); +} + +static void unf_adisc_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +static unsigned int unf_send_adisc_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_adisc_payload_s *adisc_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3437, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3438, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3439, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_ADISC); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_adisc_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + adisc_acc_pld = &fc_entry->adisc_acc.adisc_payl; + unf_fill_adisc_acc_pld(adisc_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send ADISC ACC %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_prlo_acc_pld(struct unf_prli_prlo_s *v_prlo_acc, + struct unf_lport_s *v_lport) +{ + struct unf_pril_payload_s *prlo_acc_pld = NULL; + + UNF_CHECK_VALID(0x3440, UNF_TRUE, v_prlo_acc, return); + + prlo_acc_pld = &v_prlo_acc->payload; + prlo_acc_pld->cmnd = (UNF_ELS_CMND_ACC | + ((unsigned int)UNF_FC4_FRAME_PAGE_SIZE << + UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | + ((unsigned int) + sizeof(struct unf_pril_payload_s))); + prlo_acc_pld->parms[0] = UNF_FC4_FRAME_PARM_0_FCP | + UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE; + prlo_acc_pld->parms[1] = 0; + prlo_acc_pld->parms[2] = 0; + prlo_acc_pld->parms[3] = 0; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, prlo_acc_pld, + sizeof(struct unf_pril_payload_s)); +} + +static unsigned int unf_send_prlo_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_prli_prlo_s *prlo_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3441, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3442, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3443, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PRLO); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + v_xchg->pfn_callback = NULL; // do nothing + v_xchg->pfn_ob_callback = NULL; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + prlo_acc = &fc_entry->prlo_acc; + unf_fill_prlo_acc_pld(prlo_acc, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send PRLO ACC %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +unsigned int unf_send_abts(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg; + + UNF_CHECK_VALID(0x3444, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3445, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + rport = v_xchg->rport; + UNF_CHECK_VALID(0x3446, UNF_TRUE, rport, return UNF_RETURN_ERROR); + + /* set pkg info */ + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + pkg.type = UNF_PKG_BLS_REQ; + pkg.frame_head.csctl_sid = v_xchg->sid; + pkg.frame_head.rctl_did = v_xchg->did; + pkg.frame_head.oxid_rxid = + (unsigned int)v_xchg->ox_id << 16 | v_xchg->rx_id; + pkg.xchg_contex = v_xchg; + pkg.unf_cmnd_pload_bl.buffer_ptr = + (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + + pkg.unf_cmnd_pload_bl.buf_dma_addr = + v_xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = v_xchg->hot_pool_tag; + + UNF_SET_XCHG_ALLOC_TIME(&pkg, v_xchg); + UNF_SET_ABORT_INFO_IOTYPE(&pkg, v_xchg); + + pkg.private[PKG_PRIVATE_XCHG_RPORT_INDEX] = + v_xchg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + + /* Send ABTS frame to target */ + ret = unf_bls_cmnd_send(v_lport, &pkg, v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) send ABTS %s. Abort exch(0x%p) Cmdsn:0x%lx, tag(0x%x) iotype(0x%x)", + v_lport->port_id, v_lport->nport_id, + (ret == UNF_RETURN_ERROR) ? "failed" : "succeed", + v_xchg, (unsigned long)v_xchg->cmnd_sn, + v_xchg->hot_pool_tag, v_xchg->data_direction); + + UNF_REFERNCE_VAR(rport); + return ret; +} + +unsigned int unf_release_rport_res(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_rport_info_s rport_info; + + UNF_CHECK_VALID(0x3447, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3448, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&rport_info, 0, sizeof(struct unf_rport_info_s)); + + rport_info.rport_index = v_rport->rport_index; + rport_info.nport_id = v_rport->nport_id; + rport_info.port_name = v_rport->port_name; + + /* 2. release R_Port(parent context/Session) resource */ + if (!v_lport->low_level_func.service_op.pfn_unf_release_rport_res) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) release rport resource function can't be NULL", + v_lport->port_id); + + return ret; + } + + ret = v_lport->low_level_func.service_op.pfn_unf_release_rport_res( + v_lport->fc_port, + &rport_info); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) rport_index(0x%x, %p) send release session CMND failed", + v_lport->port_id, rport_info.rport_index, v_rport); + + return ret; +} + +static inline unsigned char unf_determin_bbscn(unsigned char local_bbscn, + unsigned char remote_bbscn) +{ + if ((remote_bbscn == 0) || (local_bbscn == 0)) + local_bbscn = 0; + else + local_bbscn = local_bbscn > remote_bbscn ? + local_bbscn : remote_bbscn; + + return local_bbscn; +} + +static void unf_cfg_lowlevel_fabric_params( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_fabric_parms_s *v_login_parms) +{ + struct unf_port_login_parms_s login_co_parms = { 0 }; + unsigned int remote_edtov = 0; + unsigned int ret = 0; + unsigned char remote_edtov_resolution = 0; /* 0:ms; 1:ns */ + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) + return; + + login_co_parms.remote_rttov_tag = + (unsigned char)UNF_GET_RT_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.remote_edtov_tag = 0; + login_co_parms.remote_bbcredit = + (unsigned short) + UNF_GET_BB_CREDIT_FROM_PARAMS(v_login_parms); + login_co_parms.compared_bbscn = + (unsigned int)unf_determin_bbscn( + (unsigned char) + v_lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char) + UNF_GET_BB_SC_N_FROM_PARAMS(v_login_parms)); + + remote_edtov_resolution = + (unsigned char) + UNF_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(v_login_parms); + remote_edtov = UNF_GET_E_D_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.compared_edtov_val = + remote_edtov_resolution ? + (remote_edtov / 1000000) : remote_edtov; + + login_co_parms.compared_ratov_val = + UNF_GET_RA_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.els_cmnd_code = ELS_FLOGI; + + if (v_lport->en_act_topo & UNF_TOP_P2P_MASK) { + login_co_parms.en_act_topo = + (v_login_parms->co_parms.n_port == UNF_F_PORT) ? + UNF_ACT_TOP_P2P_FABRIC : UNF_ACT_TOP_P2P_DIRECT; + } else { + login_co_parms.en_act_topo = v_lport->en_act_topo; + } + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + (void *)v_lport->fc_port, + UNF_PORT_CFG_UPDATE_FABRIC_PARAM, + (void *)&login_co_parms); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Lowlevel unsupport fabric config"); +} + +static unsigned int unf_check_flogi_params( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_fabric_parms_s *v_fabric_parms) +{ + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3460, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3461, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3462, UNF_TRUE, v_fabric_parms, + return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_lport); + UNF_REFERNCE_VAR(v_rport); + + if (v_fabric_parms->cl_parms[2].valid == UNF_CLASS_INVALID) { + /* Discard directly */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) NPort_ID(0x%x) FLOGI not support class3", + v_lport->port_id, v_rport->nport_id); + + return UNF_RETURN_ERROR; + } + + return ret; +} + +static void unf_save_fabric_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_fabric_parms_s *v_fabric_parms) +{ + unsigned long long fabric_node_name = 0; + + UNF_CHECK_VALID(0x3463, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3464, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3465, UNF_TRUE, v_fabric_parms, return); + + UNF_REFERNCE_VAR(v_lport); + fabric_node_name = (unsigned long long) + (((unsigned long long) + (v_fabric_parms->high_node_name) << 32) | + ((unsigned long long) + (v_fabric_parms->low_node_name))); + + /* R_Port for 0xfffffe is used for FLOGI, not need to save WWN */ + if (v_fabric_parms->co_parms.bb_receive_data_field_size > + UNF_MAX_FRAME_SIZE) + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; // 2112 + else + v_rport->max_frame_size = + v_fabric_parms->co_parms.bb_receive_data_field_size; + + /* with Fabric attribute */ + if (v_fabric_parms->co_parms.n_port == UNF_F_PORT) { + v_rport->ed_tov = v_fabric_parms->co_parms.e_d_tov; + v_rport->ra_tov = v_fabric_parms->co_parms.r_a_tov; + v_lport->ed_tov = v_fabric_parms->co_parms.e_d_tov; + v_lport->ra_tov = v_fabric_parms->co_parms.r_a_tov; + v_lport->rr_tov = UNF_CALC_LPORT_RRTOV(v_lport); + v_lport->fabric_node_name = fabric_node_name; + } + + /* Configure info from FLOGI to chip */ + unf_cfg_lowlevel_fabric_params(v_lport, v_rport, v_fabric_parms); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) Rport(0x%x) login parameter: E_D_TOV = %u. LPort E_D_TOV = %u. fabric nodename: 0x%x%x", + v_lport->port_id, + v_rport->nport_id, + (v_fabric_parms->co_parms.e_d_tov), + v_lport->ed_tov, + v_fabric_parms->high_node_name, + v_fabric_parms->low_node_name); +} + +static unsigned int unf_flogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_flogi_fdisc_acc_s *flogi_frame = NULL; + struct unf_fabric_parms_s *fabric_login_parms = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + unsigned long long wwpn = 0; + unsigned long long wwnn = 0; + + UNF_CHECK_VALID(0x3466, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3467, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_sid); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x)<---RPort(0x%x) Receive FLOGI with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_FLOGI); + + /* Check L_Port state: Offline */ + if (v_lport->en_states >= UNF_LPORT_ST_OFFLINE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) with state(0x%x) not need to handle FLOGI", + v_lport->port_id, v_lport->en_states); + + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + flogi_frame = + &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi; + fabric_login_parms = &flogi_frame->flogi_payload.fabric_parms; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + &flogi_frame->flogi_payload, + sizeof(struct unf_flogi_payload_s)); + wwpn = (unsigned long long) + (((unsigned long long) + (fabric_login_parms->high_port_name) << 32) | + ((unsigned long long)fabric_login_parms->low_port_name)); + wwnn = (unsigned long long) + (((unsigned long long) + (fabric_login_parms->high_node_name) << 32) | + ((unsigned long long)fabric_login_parms->low_node_name)); + + /* Get (new) R_Port: reuse only */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI); + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no RPort. do nothing", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* Update R_Port info */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->port_name = wwpn; + rport->node_name = wwnn; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Check RCVD FLOGI parameters: only for class-3 */ + ret = unf_check_flogi_params(v_lport, rport, fabric_login_parms); + if (ret != RETURN_OK) { + /* Discard directly */ + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* P2P fabric */ + unf_lport_update_topo(v_lport, UNF_ACT_TOP_P2P_DIRECT); + + /* Save fabric parameters */ + unf_save_fabric_params(v_lport, rport, fabric_login_parms); + + /* Send ACC for FLOGI */ + ret = unf_send_flogi_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FLOGI ACC failed and do recover", + v_lport->port_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + + return ret; +} + +static void unf_cfg_lowlevel_port_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms, + unsigned int v_cmd_type) +{ + struct unf_port_login_parms_s login_co_parms = { 0 }; + unsigned int ret = 0; + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) + return; + + login_co_parms.rport_index = v_rport->rport_index; + login_co_parms.seq_cnt = 0; + login_co_parms.ed_tov = 0; + login_co_parms.ed_tov_timer_val = v_lport->ed_tov; + login_co_parms.tx_mfs = v_rport->max_frame_size; + + login_co_parms.remote_rttov_tag = + (unsigned char)UNF_GET_RT_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.remote_edtov_tag = 0; + login_co_parms.remote_bbcredit = + (unsigned short)UNF_GET_BB_CREDIT_FROM_PARAMS(v_login_parms); + login_co_parms.els_cmnd_code = v_cmd_type; + + if (v_lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + login_co_parms.compared_bbscn = 0; + } else { + login_co_parms.compared_bbscn = + (unsigned int)unf_determin_bbscn( + (unsigned char) + v_lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char) + UNF_GET_BB_SC_N_FROM_PARAMS(v_login_parms)); + } + + login_co_parms.compared_edtov_val = v_lport->ed_tov; + login_co_parms.compared_ratov_val = v_lport->ra_tov; + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + (void *)v_lport->fc_port, + UNF_PORT_CFG_UPDATE_PLOGI_PARAM, + (void *)&login_co_parms); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) Lowlevel unsupport port config", + v_lport->port_id); +} + +unsigned int unf_check_plogi_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms) +{ + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3468, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3469, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3470, UNF_TRUE, v_login_parms, + return UNF_RETURN_ERROR); + + /* Parameters check: Class-type */ + if ((v_login_parms->cl_parms[2].valid == UNF_CLASS_INVALID) || + (v_login_parms->co_parms.bb_receive_data_field_size == 0)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort N_Port_ID(0x%x) with PLOGI parameters invalid: class3(%u), BBReceiveDataFieldSize(0x%x), send LOGO", + v_lport->port_id, v_rport->nport_id, + v_login_parms->cl_parms[2].valid, + v_login_parms->co_parms.bb_receive_data_field_size); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + /* --->>> LOGO */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* Enter LOGO stage */ + unf_rport_enter_logo(v_lport, v_rport); + return UNF_RETURN_ERROR; + } + + /* 16G FC Brocade SW, Domain Controller's + * PLOGI both support CLASS-1 & CLASS-2 + */ + if ((v_login_parms->cl_parms[0].valid == UNF_CLASS_VALID) || + (v_login_parms->cl_parms[1].valid == UNF_CLASS_VALID)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) get PLOGI class1(%u) class2(%u) from N_Port_ID(0x%x)", + v_lport->port_id, + v_login_parms->cl_parms[0].valid, + v_login_parms->cl_parms[1].valid, + v_rport->nport_id); + } + + return ret; +} + +static void unf_save_plogi_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms, + unsigned int v_cmd_code) +{ +#define UNF_DELAY_TIME 100 /* WWPN smaller delay to send PRLI with COM mode */ + + unsigned long long wwpn = INVALID_VALUE64; + unsigned long long wwnn = INVALID_VALUE64; + unsigned int ed_tov = 0; + unsigned int remote_edtov = 0; + + if (v_login_parms->co_parms.bb_receive_data_field_size > + UNF_MAX_FRAME_SIZE) + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; // 2112 + else + v_rport->max_frame_size = + v_login_parms->co_parms.bb_receive_data_field_size; + + wwnn = (unsigned long long) + (((unsigned long long) + (v_login_parms->high_node_name) << 32) | + ((unsigned long long)v_login_parms->low_node_name)); + wwpn = (unsigned long long) + (((unsigned long long) + (v_login_parms->high_port_name) << 32) | + ((unsigned long long)v_login_parms->low_port_name)); + + remote_edtov = v_login_parms->co_parms.e_d_tov; + ed_tov = v_login_parms->co_parms.e_d_tov_resolution ? + (remote_edtov / 1000000) : remote_edtov; + + v_rport->port_name = wwpn; + v_rport->node_name = wwnn; + v_rport->local_nport_id = v_lport->nport_id; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) || + (v_lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP)) { + /* P2P or Private Loop */ + v_lport->ed_tov = (v_lport->ed_tov > ed_tov) ? + v_lport->ed_tov : ed_tov; + v_lport->ra_tov = 2 * v_lport->ed_tov; // 2 * E_D_TOV + v_lport->rr_tov = UNF_CALC_LPORT_RRTOV(v_lport); + + if (ed_tov != 0) + v_rport->ed_tov = ed_tov; + else + v_rport->ed_tov = UNF_DEFAULT_EDTOV; + } else { + /* SAN: E_D_TOV updated by FLOGI */ + v_rport->ed_tov = v_lport->ed_tov; + } + + /* WWPN smaller: delay to send PRLI */ + if (v_rport->port_name > v_lport->port_name) + v_rport->ed_tov += UNF_DELAY_TIME; // 100ms + + /* Configure port parameters to low level (chip) */ + unf_cfg_lowlevel_port_params(v_lport, v_rport, v_login_parms, + v_cmd_code); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) with WWPN(0x%llx) WWNN(0x%llx) login: ED_TOV(%u) Port: ED_TOV(%u)", + v_lport->port_id, + v_rport->nport_id, + v_rport->port_name, v_rport->node_name, + ed_tov, + v_lport->ed_tov); +} + +static int unf_check_bbscn_is_enabled(unsigned char local_bbscn, + unsigned char remote_bbscn) +{ + return unf_determin_bbscn(local_bbscn, remote_bbscn) ? + UNF_TRUE : UNF_FALSE; +} + +static unsigned int unf_irq_process_switch_2_thread(void *v_lport, + struct unf_xchg_s *v_xchg, + unf_evt_task v_evt_task) +{ + struct unf_cm_event_report *event = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = 0; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x1996, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1996, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + lport = v_lport; + xchg = v_xchg; + + if (unlikely((!lport->event_mgr.pfn_unf_get_free_event) || + (!lport->event_mgr.pfn_unf_post_event) || + (!lport->event_mgr.pfn_unf_release_event))) { + UNF_TRACE(0x2065, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) event function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + ret = unf_xchg_ref_inc(xchg, SFS_RESPONSE); + UNF_CHECK_VALID(0x3343, UNF_TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + event = lport->event_mgr.pfn_unf_get_free_event((void *)v_lport); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, event, + return UNF_RETURN_ERROR); + + event->lport = lport; + event->event_asy_flag = UNF_EVENT_ASYN; + event->pfn_unf_event_task = v_evt_task; + event->para_in = v_xchg; + lport->event_mgr.pfn_unf_post_event(lport, event); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) start to switch thread process now", + lport->port_id); + + return ret; +} + +static unsigned int unf_plogi_handler_com_process(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_s *xchg = v_xchg; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_plogi_pdisc_s *plogi_frame = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->rport, + return UNF_RETURN_ERROR); + + lport = xchg->lport; + rport = xchg->rport; + plogi_frame = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi; + login_parms = &plogi_frame->payload.parms; + + unf_save_plogi_params(lport, rport, login_parms, + ELS_PLOGI); + + /* Update state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = xchg->sid; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Send PLOGI ACC to remote port */ + ret = unf_send_plogi_acc(lport, rport, xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send PLOGI ACC failed", + lport->port_id); + + /* NOTE: exchange has been freed inner(before) */ + unf_rport_error_recovery(rport); + return ret; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) send PLOGI ACC to Port(0x%x) succeed", + lport->port_id, rport->nport_id); + + return ret; +} + +static int unf_plogi_async_handle(void *v_argc_in, void *v_argc_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_argc_in; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + ret = unf_plogi_handler_com_process(xchg); + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + + return (int)ret; +} + +static unsigned int unf_send_els_rjt_by_did(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + unsigned int v_did, + struct unf_rjt_info_s *v_rjt_info) +{ + struct unf_els_rjt_s *els_rjt = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = v_xchg; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + UNF_CHECK_VALID(0x3503, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3504, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + xchg->cmnd_code = UNF_SET_ELS_RJT_TYPE(v_rjt_info->els_cmnd_code); + xchg->did = v_did; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = v_lport; + xchg->rport = NULL; + xchg->disc_rport = NULL; + + xchg->pfn_callback = NULL; + xchg->pfn_ob_callback = NULL; + + unf_fill_package(&pkg, xchg, NULL); + + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + + els_rjt = &fc_entry->els_rjt; + memset(els_rjt, 0, sizeof(struct unf_els_rjt_s)); + unf_fill_rjt_pld(els_rjt, v_rjt_info->reason_code, + v_rjt_info->reason_explanation); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send LS_RJT %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_did, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_plogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_s *xchg = v_xchg; + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + struct unf_plogi_pdisc_s *plogi_frame = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + struct unf_rjt_info_s rjt_info = { 0 }; + unsigned long long wwpn = INVALID_VALUE64; + unsigned int ret = UNF_RETURN_ERROR; + int bbscn_enabled = UNF_FALSE; + int switch_2_thread = UNF_FALSE; + + UNF_CHECK_VALID(0x3474, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3475, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + /* 1. Maybe: PLOGI is sent by Name server */ + if ((v_sid < UNF_FC_FID_DOM_MGR) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive PLOGI. Port(0x%x_0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, v_sid, + v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_PLOGI); + + /* 2. State check: Offline */ + if (lport->en_states >= UNF_LPORT_ST_OFFLINE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) received PLOGI with state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + unf_cm_free_xchg(lport, xchg); + return UNF_RETURN_ERROR; + } + + /* + * 3. According to FC-LS 4.2.7.1: + * After RCVD PLogi or send Plogi ACC, need to termitate open EXCH + */ + unf_cm_xchg_mgr_abort_io_by_id(lport, rport, v_sid, lport->nport_id, 0); + + /* Get R_Port by WWpn */ + plogi_frame = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi; + login_parms = &plogi_frame->payload.parms; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, + &plogi_frame->payload, + sizeof(struct unf_plogi_payload_s)); + + wwpn = (unsigned long long) + (((unsigned long long) + (login_parms->high_port_name) << 32) | + ((unsigned long long)login_parms->low_port_name)); + + /* 4. Get (new) R_Port (by wwpn) */ + rport = unf_find_rport(lport, v_sid, wwpn); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, v_sid); + if (!rport) { + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = ELS_PLOGI; + rjt_info.reason_code = UNF_LS_RJT_BUSY; + rjt_info.reason_explanation = + UNF_LS_RJT_INSUFFICIENT_RESOURCES; + + /* R_Port is NULL: Send ELS RJT for PLOGI */ + (void)unf_send_els_rjt_by_did(lport, xchg, v_sid, &rjt_info); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no RPort and send PLOGI reject", + lport->port_id); + + /* NOTE: exchange has been freed inner(before) */ + return UNF_RETURN_ERROR; + } + + /* 5. Cancel recovery timer work after RCVD PLOGI */ + if (cancel_delayed_work(&rport->recovery_work)) + atomic_dec(&rport->rport_ref_cnt); + + /* + * 6. Plogi parameters check + * Call by: (RCVD) PLOGI handler & callback function for RCVD PLOGI_ACC + */ + ret = unf_check_plogi_params(lport, rport, login_parms); + if (ret != RETURN_OK) { + unf_cm_free_xchg(lport, xchg); + return UNF_RETURN_ERROR; + } + + xchg->lport = v_lport; + xchg->rport = rport; + xchg->sid = v_sid; + + /* 7. About bbscn for context change */ + bbscn_enabled = unf_check_bbscn_is_enabled( + (unsigned char)lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); + if ((lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) && + (bbscn_enabled == UNF_TRUE)) { + switch_2_thread = UNF_TRUE; + lport->b_bbscn_support = UNF_TRUE; + } + + /* 8. Process PLOGI Frame: switch to thread if necessary */ + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) + /* Wait for LR complete sync */ + ret = unf_irq_process_switch_2_thread(lport, xchg, + unf_plogi_async_handle); + else + ret = unf_plogi_handler_com_process(xchg); + + return ret; +} + +static void unf_obtain_tape_capacity(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int tape_parm) +{ + unsigned int rec_support = 0; + unsigned int task_retry_support = 0; + unsigned int retry_support = 0; + + rec_support = tape_parm & UNF_FC4_FRAME_PARM_3_REC_SUPPORT; + task_retry_support = tape_parm & + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; + retry_support = tape_parm & UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; + + if ((v_lport->low_level_func.lport_cfg_items.tape_support) && + rec_support && task_retry_support && retry_support) { + v_rport->tape_support_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) FC_tape is needed for RPort(0x%x)", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id); + } + + if ((tape_parm & UNF_FC4_FRAME_PARM_3_CONF_ALLOW) && + (v_lport->low_level_func.lport_cfg_items.fcp_conf != UNF_FALSE)) { + v_rport->fcp_conf_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) FCP confirm is needed for RPort(0x%x)", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id); + } +} + +unsigned int unf_prli_handler_com_process(struct unf_xchg_s *v_xchg) +{ + struct unf_prli_prlo_s *prli = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flags = 0; + unsigned int uisid = 0; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + + xchg = v_xchg; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, + return UNF_RETURN_ERROR); + lport = xchg->lport; + uisid = v_xchg->sid; + + UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_PRLI); + + /* 1. Get R_Port: for each R_Port from rport_busy_list */ + rport = unf_get_rport_by_nport_id(lport, uisid); + if (!rport) { + /* non session (R_Port) existence */ + (void)unf_send_logo_by_did(lport, uisid); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) received PRLI but no RPort SID(0x%x) OX_ID(0x%x)", + lport->port_id, lport->nport_id, uisid, + v_xchg->ox_id); + + unf_cm_free_xchg(lport, v_xchg); + return ret; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Receive PRLI. Port(0x%x)<---RPort(0x%x) with S_ID(0x%x)", + lport->port_id, rport->nport_id, uisid); + + /* 2. Get PRLI info */ + prli = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prli; + if ((uisid < UNF_FC_FID_DOM_MGR) || + (lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Receive PRLI. Port(0x%x_0x%x)<---RPort(0x%x) parameter-3(0x%x) OX_ID(0x%x)", + lport->port_id, lport->nport_id, uisid, + prli->payload.parms[3], v_xchg->ox_id); + } + + UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, + &prli->payload, sizeof(struct unf_pril_payload_s)); + + spin_lock_irqsave(&rport->rport_state_lock, flags); + + /* 3. Increase R_Port ref_cnt */ + ret = unf_rport_ref_inc(rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x_0x%p) is removing and do nothing", + lport->port_id, rport->nport_id, rport); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_cm_free_xchg(lport, v_xchg); + return RETURN_OK; + } + + /* 4. Cancel R_Port Open work */ + if (cancel_delayed_work(&rport->open_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) cancel open work succeed", + lport->port_id, lport->nport_id, rport->nport_id); + + /* This is not the last counter */ + atomic_dec(&rport->rport_ref_cnt); + } + + /* 5. Check R_Port state */ + if ((rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) && + (rport->rp_state != UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) with state(0x%x) when received PRLI, send LOGO", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); // LOGO + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* NOTE: Start to send LOGO */ + unf_rport_enter_logo(lport, rport); + + unf_cm_free_xchg(lport, v_xchg); + unf_rport_ref_dec(rport); + + return ret; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* 6. Update R_Port options(INI/TGT/BOTH) */ + rport->options = prli->payload.parms[3] & + (UNF_FC4_FRAME_PARM_3_TGT | + UNF_FC4_FRAME_PARM_3_INI); + + unf_update_port_feature(rport->port_name, rport->options); + + /* for Confirm */ + rport->fcp_conf_needed = UNF_FALSE; + + unf_obtain_tape_capacity(lport, rport, prli->payload.parms[3]); + + if ((prli->payload.parms[3] & UNF_FC4_FRAME_PARM_3_CONF_ALLOW) && + (lport->low_level_func.lport_cfg_items.fcp_conf != UNF_FALSE)) { + rport->fcp_conf_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) FCP confirm is needed for RPort(0x%x)", + lport->port_id, lport->nport_id, rport->nport_id); + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x_0x%x) RPort(0x%x) parameter-3(0x%x) options(0x%x)", + lport->port_id, lport->nport_id, rport->nport_id, + prli->payload.parms[3], rport->options); + + /* 7. Send PRLI ACC */ + ret = unf_send_prli_acc(lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) RPort(0x%x) send PRLI ACC failed", + lport->port_id, lport->nport_id, rport->nport_id); + + /* NOTE: exchange has been freed inner(before) */ + unf_rport_error_recovery(rport); + } + + /* 8. Decrease R_Port ref_cnt */ + unf_rport_ref_dec(rport); + + return ret; +} + +static int unf_prli_async_handle(void *v_argc_in, void *v_argc_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_argc_in; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + ret = unf_prli_handler_com_process(xchg); + + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + + return (int)ret; +} + +static unsigned int unf_prli_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + int switch_2_thread = UNF_FALSE; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3476, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3477, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->sid = v_sid; + v_xchg->lport = v_lport; + lport = v_lport; + + if ((v_lport->b_bbscn_support) && + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) + switch_2_thread = UNF_TRUE; + + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) + /* Wait for LR done sync */ + ret = unf_irq_process_switch_2_thread(v_lport, v_xchg, + unf_prli_async_handle); + else + ret = unf_prli_handler_com_process(v_xchg); + + return ret; +} + +static void unf_save_rscn_port_id( + struct unf_rscn_mg_s *v_rscn_mg, + struct unf_rscn_port_id_page_s *v_rscn_port_id) +{ + struct unf_port_id_page_s *exit_port_id_page = NULL; + struct unf_port_id_page_s *new_port_id_page = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + enum int_e repeat = UNF_FALSE; + + UNF_CHECK_VALID(0x3478, UNF_TRUE, v_rscn_mg, return); + UNF_CHECK_VALID(0x3479, UNF_TRUE, v_rscn_port_id, return); + + /* 1. check new RSCN Port_ID (RSNC_Page) + * whether within RSCN_Mgr or not + */ + spin_lock_irqsave(&v_rscn_mg->rscn_id_list_lock, flag); + if (list_empty(&v_rscn_mg->list_using_rscn_page)) { + repeat = UNF_FALSE; + } else { + /* Check repeat: for each exist RSCN page + * form RSCN_Mgr Page list + */ + list_for_each_safe(node, next_node, + &v_rscn_mg->list_using_rscn_page) { + exit_port_id_page = + list_entry(node, struct unf_port_id_page_s, + list_node_rscn); + if ((exit_port_id_page->port_id_port == + v_rscn_port_id->port_id_port) && + (exit_port_id_page->port_id_area == + v_rscn_port_id->port_id_area) && + (exit_port_id_page->port_id_domain == + v_rscn_port_id->port_id_domain)) { + repeat = UNF_TRUE; + break; + } + } + } + spin_unlock_irqrestore(&v_rscn_mg->rscn_id_list_lock, flag); + + UNF_CHECK_VALID(0x3480, UNF_TRUE, v_rscn_mg->pfn_unf_get_free_rscn_node, + return); + + /* 2. Get & add free RSNC Node --->>> RSCN_Mgr */ + if (repeat == UNF_FALSE) { + new_port_id_page = + v_rscn_mg->pfn_unf_get_free_rscn_node(v_rscn_mg); + if (!new_port_id_page) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Get free RSCN node failed"); + + return; + } + + new_port_id_page->uc_addr_format = v_rscn_port_id->addr_format; + new_port_id_page->uc_event_qualifier = + v_rscn_port_id->event_qualifier; + new_port_id_page->uc_reserved = v_rscn_port_id->reserved; + new_port_id_page->port_id_domain = + v_rscn_port_id->port_id_domain; + new_port_id_page->port_id_area = v_rscn_port_id->port_id_area; + new_port_id_page->port_id_port = v_rscn_port_id->port_id_port; + + /* Add entry to list: using_rscn_page */ + spin_lock_irqsave(&v_rscn_mg->rscn_id_list_lock, flag); + list_add_tail(&new_port_id_page->list_node_rscn, + &v_rscn_mg->list_using_rscn_page); + spin_unlock_irqrestore(&v_rscn_mg->rscn_id_list_lock, flag); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) has repeat RSCN node with domain(0x%x) area(0x%x)", + v_rscn_port_id->port_id_domain, + v_rscn_port_id->port_id_area, + v_rscn_port_id->port_id_port); + } +} + +static unsigned int unf_analysis_rscn_payload(struct unf_lport_s *v_lport, + struct unf_rscn_pld_s *v_rscn_pld) +{ +#define UNF_OS_DISC_REDISC_TIME 10000 + + struct unf_rscn_port_id_page_s *rscn_port_id = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rscn_mg_s *rscn_mgr = NULL; + unsigned int i = 0; + unsigned int pld_len = 0; + unsigned int port_id_page_cnt = 0; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + enum int_e need_disc_flag = UNF_FALSE; + + UNF_CHECK_VALID(0x3481, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3482, UNF_TRUE, v_rscn_pld, return UNF_RETURN_ERROR); + + /* This field is the length in bytes of the entire Payload, + * inclusive of the word 0 + */ + pld_len = UNF_GET_RSCN_PLD_LEN(v_rscn_pld->cmnd); + pld_len -= sizeof(v_rscn_pld->cmnd); + port_id_page_cnt = pld_len / UNF_RSCN_PAGE_LEN; + + /* Pages within payload is nor more than 255 */ + if (port_id_page_cnt > UNF_RSCN_PAGE_SUM) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) page num(0x%x) exceed 255 in RSCN", + v_lport->port_id, v_lport->nport_id, + port_id_page_cnt); + + return UNF_RETURN_ERROR; + } + + /* L_Port-->Disc-->Rscn_Mgr */ + disc = &v_lport->disc; + rscn_mgr = &disc->rscn_mgr; + + /* for each ID from RSCN_Page: check whether need to Disc or not */ + while (i < port_id_page_cnt) { + rscn_port_id = &v_rscn_pld->port_id_page[i]; + if (unf_lookup_lport_by_nport_id(v_lport, *(unsigned int *)rscn_port_id)) { + /* Prevent to create session with L_Port which have the same N_Port_ID */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) find local N_Port_ID(0x%x) within RSCN payload", + ((struct unf_lport_s *) + (v_lport->root_lport))->nport_id, + *(unsigned int *)rscn_port_id); + } else { + /* New RSCN_Page ID find, save it to RSCN_Mgr */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x_0x%x) save RSCN N_Port_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, + *(unsigned int *)rscn_port_id); + + /* 1. new RSCN_Page ID find, save it to RSCN_Mgr */ + unf_save_rscn_port_id(rscn_mgr, rscn_port_id); + need_disc_flag = UNF_TRUE; + unf_report_io_dm_event(v_lport, ELS_RSCN, + *(unsigned int *)rscn_port_id); + } + i++; + } + + if (need_disc_flag != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info]Port(0x%x) find all N_Port_ID and do not need to disc", + ((struct unf_lport_s *)(v_lport->root_lport))->nport_id); + + return RETURN_OK; + } + + /* 2. Do/Start Disc: Check & do Disc (GID_PT) process */ + if (!disc->unf_disc_temp.pfn_unf_disc_start) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) DISC start function is NULL", + v_lport->nport_id, v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if ((disc->en_states == UNF_DISC_ST_END) || + ((jiffies - disc->last_disc_jiff) > + msecs_to_jiffies(UNF_OS_DISC_REDISC_TIME))) { + disc->disc_option = UNF_RSCN_DISC; + disc->last_disc_jiff = jiffies; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + ret = disc->unf_disc_temp.pfn_unf_disc_start(v_lport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_ABNORMAL, UNF_INFO, + "[info]Port(0x%x_0x%x) DISC state(0x%x) with last time(%llu) and don't do DISC", + v_lport->port_id, v_lport->nport_id, + disc->en_states, disc->last_disc_jiff); + + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + } + + return ret; +} + +static unsigned int unf_rscn_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + /* + * A RSCN ELS shall be sent to registered Nx_Ports + * when an event occurs that may have affected the state of + * one or more Nx_Ports, or the ULP state within the Nx_Port. + * + * The Payload of a RSCN Request includes a list + * containing the addresses of the affected Nx_Ports. + * + * Each affected Port_ID page contains the ID of the Nx_Port, + * Fabric Controller, E_Port, domain, or area for + * which the event was detected. + */ + struct unf_rscn_pld_s *rscn_pld = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int pld_len = 0; + + UNF_REFERNCE_VAR(pld_len); + UNF_CHECK_VALID(0x3483, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3484, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Receive RSCN Port(0x%x_0x%x)<---RPort(0x%x) OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, v_sid, + v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_RSCN); + + /* 1. Get R_Port by S_ID */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); // rport busy_list + if (!rport) { + rport = unf_rport_get_free_and_init(v_lport, + UNF_PORT_TYPE_FC, v_sid); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) received RSCN but has no RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, + v_sid, v_xchg->ox_id); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + rport->nport_id = v_sid; + } + + rscn_pld = + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; + UNF_CHECK_VALID(0x3485, UNF_TRUE, NULL != rscn_pld, + return UNF_RETURN_ERROR); + pld_len = UNF_GET_RSCN_PLD_LEN(rscn_pld->cmnd); + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, rscn_pld, pld_len); + + /* 2. NOTE: Analysis RSCN payload(save & disc if necessary) */ + ret = unf_analysis_rscn_payload(v_lport, rscn_pld); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) analysis RSCN failed", + v_lport->port_id, v_lport->nport_id); + + /* 3. send rscn_acc after analysis payload */ + ret = unf_send_rscn_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) send RSCN response failed", + v_lport->port_id, v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + UNF_REFERNCE_VAR(pld_len); + return ret; +} + +static void unf_analysis_pdisc_pld(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_plogi_pdisc_s *v_pdisc) +{ + struct unf_lgn_parms_s *pdisc_params = NULL; + unsigned long long wwpn = INVALID_VALUE64; + unsigned long long wwnn = INVALID_VALUE64; + + UNF_CHECK_VALID(0x3486, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3487, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3488, UNF_TRUE, v_pdisc, return); + UNF_REFERNCE_VAR(v_lport); + + pdisc_params = &v_pdisc->payload.parms; + if (pdisc_params->co_parms.bb_receive_data_field_size > + UNF_MAX_FRAME_SIZE) + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; // 2112 + else + v_rport->max_frame_size = + pdisc_params->co_parms.bb_receive_data_field_size; + + wwnn = (unsigned long long) + (((unsigned long long) + (pdisc_params->high_node_name) << 32) | + ((unsigned long long)pdisc_params->low_node_name)); + wwpn = (unsigned long long) + (((unsigned long long)(pdisc_params->high_port_name) << 32) | + ((unsigned long long)pdisc_params->low_port_name)); + + v_rport->port_name = wwpn; + v_rport->node_name = wwnn; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) save PDISC parameters to Rport(0x%x) WWPN(0x%llx) WWNN(0x%llx)", + v_lport->port_id, v_rport->nport_id, + v_rport->port_name, v_rport->node_name); +} + +static unsigned int unf_send_pdisc_rjt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_rjt_info_s rjt_info; + + UNF_CHECK_VALID(0x3432, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3433, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3434, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = ELS_PDISC; + rjt_info.reason_code = UNF_LS_RJT_LOGICAL_ERROR; + rjt_info.reason_explanation = UNF_LS_RJT_NO_ADDITIONAL_INFO; + + ret = unf_send_els_rjt_by_rport(v_lport, v_xchg, v_rport, &rjt_info); + + return ret; +} + +static unsigned int unf_pdisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_plogi_pdisc_s *pdisc = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + unsigned int ret = RETURN_OK; + unsigned long long wwpn = 0; + + UNF_CHECK_VALID(0x3489, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3490, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive PDISC. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_PDISC); + pdisc = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->pdisc; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + &pdisc->payload, + sizeof(struct unf_plogi_payload_s)); + wwpn = (unsigned long long) + (((unsigned long long) + (pdisc->payload.parms.high_port_name) << 32) | + ((unsigned long long)pdisc->payload.parms.low_port_name)); + + rport = unf_find_rport(v_lport, v_sid, wwpn); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find RPort by NPort ID(0x%x). Free exchange and send LOGO", + v_lport->port_id, v_sid); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MINOR, + "[info]Port(0x%x) get exist RPort(0x%x) when receive PDISC with S_Id(0x%x)", + v_lport->port_id, rport->nport_id, v_sid); + + if (v_sid >= UNF_FC_FID_DOM_MGR) + return unf_send_pdisc_rjt(v_lport, rport, v_xchg); + + unf_analysis_pdisc_pld(v_lport, rport, pdisc); + + /* State: READY */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + if (rport->rp_state == UNF_RPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is READY when receiving PDISC", + v_lport->port_id, v_sid); + + spin_unlock_irqrestore(&rport->rport_state_lock, + flags); + + ret = unf_send_pdisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) handle PDISC failed", + v_lport->port_id); + + return ret; + } + + /* Report Down/Up event to scsi */ + unf_update_lport_state_by_linkup_event(v_lport, rport, + rport->options); + } + /* State: Closing */ + else if ((rport->rp_state == UNF_RPORT_ST_CLOSING) && + (rport->session)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, + flags); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + } + /* State: PRLI_WAIT */ + else if (rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + ret = unf_send_pdisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) handle PDISC failed", + v_lport->port_id); + + return ret; + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC, send LOGO", + v_lport->port_id, v_sid, rport->rp_state); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_rport_enter_logo(v_lport, rport); + unf_cm_free_xchg(v_lport, v_xchg); + } + } + + return ret; +} + +static void unf_analysis_adisc_pld(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_adisc_payload_s *v_adisc_pld) +{ + unsigned long long wwpn = INVALID_VALUE64; + unsigned long long wwnn = INVALID_VALUE64; + + UNF_CHECK_VALID(0x3491, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3492, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3493, UNF_TRUE, v_adisc_pld, return); + UNF_REFERNCE_VAR(v_lport); + + wwnn = (unsigned long long) + (((unsigned long long)(v_adisc_pld->high_node_name) << 32) | + ((unsigned long long)v_adisc_pld->low_node_name)); + wwpn = (unsigned long long) + (((unsigned long long)(v_adisc_pld->high_port_name) << 32) | + ((unsigned long long)v_adisc_pld->low_port_name)); + + v_rport->port_name = wwpn; + v_rport->node_name = wwnn; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) save ADISC parameters to RPort(0x%x), WWPN(0x%llx) WWNN(0x%llx) NPort ID(0x%x)", + v_lport->port_id, v_rport->nport_id, + v_rport->port_name, v_rport->node_name, + v_adisc_pld->nport_id); +} + +static unsigned int unf_adisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_adisc_payload_s *adisc_pld = NULL; + unsigned long flags = 0; + unsigned long long wwpn = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3494, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3495, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive ADISC. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_ADISC); + adisc_pld = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->adisc.adisc_payl; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, adisc_pld, + sizeof(struct unf_adisc_payload_s)); + wwpn = (unsigned long long) + (((unsigned long long)(adisc_pld->high_port_name) << 32) | + ((unsigned long long)adisc_pld->low_port_name)); + + rport = unf_find_rport(v_lport, v_sid, wwpn); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find RPort by NPort ID(0x%x). Free exchange and send LOGO", + v_lport->port_id, v_sid); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + + return ret; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MINOR, + "[info]Port(0x%x) get exist RPort(0x%x) when receive ADISC with S_ID(0x%x)", + v_lport->port_id, rport->nport_id, v_sid); + + unf_analysis_adisc_pld(v_lport, rport, adisc_pld); + + /* State: READY */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + if (rport->rp_state == UNF_RPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is READY when receiving ADISC", + v_lport->port_id, v_sid); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Return ACC directly */ + ret = unf_send_adisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send ADISC ACC failed", + v_lport->port_id); + + return ret; + } + + /* Report Down/Up event to SCSI */ + unf_update_lport_state_by_linkup_event(v_lport, rport, + rport->options); + } + /* State: Closing */ + else if ((rport->rp_state == UNF_RPORT_ST_CLOSING) && + (rport->session)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_RECOVER, + rport->nport_id); + if (rport) { + spin_lock_irqsave(&rport->rport_state_lock, flags); + rport->nport_id = v_sid; + spin_unlock_irqrestore(&rport->rport_state_lock, + flags); + + ret = unf_send_adisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ADISC ACC failed", + v_lport->port_id); + + return ret; + } + + unf_update_lport_state_by_linkup_event(v_lport, rport, + rport->options); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find RPort by NPort_ID(0x%x). Free exchange and send LOGO", + v_lport->port_id, v_sid); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + } + } + /* State: PRLI_WAIT */ + else if (rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + ret = unf_send_adisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send ADISC ACC failed", + v_lport->port_id); + + return ret; + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC, send LOGO", + v_lport->port_id, v_sid, rport->rp_state); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_rport_enter_logo(v_lport, rport); + unf_cm_free_xchg(v_lport, v_xchg); + } + + return ret; +} + +static unsigned int unf_rec_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3496, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3497, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_sid); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x) receive REC", v_lport->port_id); + + /* Send rec acc */ + ret = unf_send_rec_acc(v_lport, rport, v_xchg); // discard directly + + return ret; +} + +static unsigned int unf_rrq_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_rrq_s *rrq = NULL; + struct unf_xchg_s *xchg_reused = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + unsigned int sid = 0; + unsigned long flags = 0; + struct unf_rjt_info_s rjt_info = { 0 }; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + + UNF_CHECK_VALID(0x3498, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3499, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(rx_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_RRQ); + rrq = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rrq; + ox_id = (unsigned short)(rrq->oxid_rxid >> 16); + rx_id = (unsigned short)(rrq->oxid_rxid); + sid = rrq->sid & UNF_NPORTID_MASK; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[warn]Receive RRQ. Port(0x%x)<---RPort(0x%x) sfsXchg(0x%p) OX_ID(0x%x,0x%x) RX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg, + ox_id, v_xchg->ox_id, rx_id); + + /* Get R_Port */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) receive RRQ but has no RPort(0x%x)", + v_lport->port_id, v_sid); + + /* NOTE: send LOGO */ + ret = unf_send_logo_by_did(v_lport, sid); + + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + /* Get Target (Abort I/O) exchange context */ + /* UNF_FindXchgByOxId */ + xchg_reused = unf_cm_lookup_xchg_by_id(v_lport, ox_id, sid); + if (!xchg_reused) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) cannot find exchange with OX_ID(0x%x) RX_ID(0x%x) S_ID(0x%x)", + v_lport->port_id, ox_id, rx_id, sid); + + rjt_info.els_cmnd_code = ELS_RRQ; + rjt_info.reason_code = FCXLS_BA_RJT_LOGICAL_ERROR | + FCXLS_LS_RJT_INVALID_OXID_RXID; + + /* NOTE: send ELS RJT */ + if (unf_send_els_rjt_by_rport(v_lport, v_xchg, + rport, &rjt_info) != + RETURN_OK) { + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + return RETURN_OK; + } + + hot_pool = xchg_reused->hot_pool; + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) OxId(0x%x) Rxid(0x%x) Sid(0x%x) Hot Pool is NULL.", + v_lport->port_id, ox_id, rx_id, sid); + + return ret; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + xchg_reused->ox_id = INVALID_VALUE16; + xchg_reused->rx_id = INVALID_VALUE16; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + /* NOTE: release I/O exchange context */ + unf_xchg_ref_dec(xchg_reused, SFS_RESPONSE); + + /* Send RRQ ACC */ + ret = unf_send_rrq_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) can not send RRQ rsp. Xchg(0x%p) Ioxchg(0x%p) OX_RX_ID(0x%x 0x%x) S_ID(0x%x)", + v_lport->port_id, v_xchg, + xchg_reused, ox_id, rx_id, sid); + + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_send_els_rjt_by_rport(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + struct unf_rport_s *v_rport, + struct unf_rjt_info_s *v_rjt_info) +{ + struct unf_els_rjt_s *els_rjt = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = v_xchg; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + UNF_CHECK_VALID(0x3500, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3501, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3502, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + xchg->cmnd_code = UNF_SET_ELS_RJT_TYPE(v_rjt_info->els_cmnd_code); + xchg->did = v_rport->nport_id; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = v_lport; + xchg->rport = v_rport; + xchg->disc_rport = NULL; + + xchg->pfn_callback = NULL; + xchg->pfn_ob_callback = NULL; + + unf_fill_package(&pkg, xchg, v_rport); + + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + + els_rjt = &fc_entry->els_rjt; + memset(els_rjt, 0, sizeof(struct unf_els_rjt_s)); + unf_fill_rjt_pld(els_rjt, v_rjt_info->reason_code, + v_rjt_info->reason_explanation); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send LS_RJT for 0x%x %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + v_rjt_info->els_cmnd_code, + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + return ret; +} + +static unsigned int unf_els_cmnd_default_handler(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + unsigned int v_sid, + unsigned int v_els_cmnd_code) +{ +#define ELS_LCB 0X81 +#define ELS_RDP 0X18 + + struct unf_rport_s *rport = NULL; + struct unf_rjt_info_s rjt_info = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3505, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3506, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if ((v_els_cmnd_code != ELS_LCB) && (v_els_cmnd_code != ELS_RDP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_ABNORMAL, UNF_KEVENT, + "[info]Receive Unknown ELS command(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_els_cmnd_code, v_lport->port_id, v_sid, + v_xchg->ox_id); + } + + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = v_els_cmnd_code; + rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; + + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + if (rport) + ret = unf_send_els_rjt_by_rport(v_lport, v_xchg, rport, + &rjt_info); + else + ret = unf_send_els_rjt_by_did(v_lport, v_xchg, v_sid, + &rjt_info); + + return ret; +} + +static struct unf_xchg_s *unf_alloc_xchg_for_rcv_cmnd( + struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned int i = 0; + unsigned int offset = 0; + unsigned char *cmnd_pld = NULL; + unsigned int first_dword = 0; + unsigned int alloc_time = 0; + + UNF_CHECK_VALID(0x3508, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3509, UNF_TRUE, v_pkg, return NULL); + + if (!v_pkg->xchg_contex) { + xchg = unf_cm_get_free_xchg(v_lport, UNF_XCHG_TYPE_SFS); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[warn]Port(0x%x) get new exchange failed", + v_lport->port_id); + + return NULL; + } + + offset = (xchg->fcp_sfs_union.sfs_entry.cur_offset); + cmnd_pld = (unsigned char *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; + first_dword = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->sfs_common.frame_head.rctl_did; + + if ((cmnd_pld) || (first_dword != 0) || (offset != 0)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) exchange(0x%p) abnormal, maybe data overrun, start(%llu) command(0x%x)", + v_lport->port_id, xchg, + xchg->alloc_jif, v_pkg->cmnd); + + UNF_PRINT_SFS(UNF_INFO, v_lport->port_id, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + sizeof(union unf_sfs_u)); + } + + memset(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, 0, + sizeof(union unf_sfs_u)); + + v_pkg->xchg_contex = (void *)xchg; + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->fcp_sfs_union.sfs_entry.cur_offset = 0; + alloc_time = xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + for (i = 0; i < PKG_MAX_PRIVATE_DATA_SIZE; i++) + xchg->private[i] = v_pkg->private[i]; + + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = alloc_time; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } else { + xchg = (struct unf_xchg_s *)v_pkg->xchg_contex; + } + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + return NULL; + } + + return xchg; +} + +static unsigned char *unf_calc_big_cmnd_pld_buffer(struct unf_xchg_s *v_xchg, + unsigned int v_cmnd_code) +{ + unsigned char *cmnd_pld = NULL; + void *buf = NULL; + unsigned char *dest = NULL; + + UNF_CHECK_VALID(0x3510, UNF_TRUE, v_xchg, return NULL); + + if (v_cmnd_code == ELS_RSCN) + cmnd_pld = (unsigned char *)v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; + else + cmnd_pld = (unsigned char *)v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; + + if (!cmnd_pld) { + buf = unf_get_one_big_sfs_buf(v_xchg); + if (!buf) + return NULL; + + if (v_cmnd_code == ELS_RSCN) { + memset(buf, 0, sizeof(struct unf_rscn_pld_s)); + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld = buf; + } else { + memset(buf, 0, sizeof(struct unf_echo_payload_s)); + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld = buf; + } + + dest = (unsigned char *)buf; + } else { + dest = (unsigned char *) + (cmnd_pld + v_xchg->fcp_sfs_union.sfs_entry.cur_offset); + } + + return dest; +} + +static unsigned char *unf_calc_other_pld_buffer(struct unf_xchg_s *v_xchg) +{ + unsigned char *dest = NULL; + unsigned int offset = 0; + + UNF_CHECK_VALID(0x3511, UNF_TRUE, v_xchg, return NULL); + + offset = (sizeof(struct unf_fchead_s)) + + (v_xchg->fcp_sfs_union.sfs_entry.cur_offset); + dest = (unsigned char *) + ((unsigned char *) + (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + + offset); + + return dest; +} + +static struct unf_xchg_s *unf_mv_data_2_xchg(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned char *dest = NULL; + unsigned int length = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3512, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3513, UNF_TRUE, v_pkg, return NULL); + + xchg = unf_alloc_xchg_for_rcv_cmnd(v_lport, v_pkg); + if (!xchg) + return NULL; + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + + memcpy(&xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->sfs_common.frame_head, + &v_pkg->frame_head, + sizeof(v_pkg->frame_head)); + + if ((v_pkg->cmnd == ELS_RSCN) || (v_pkg->cmnd == ELS_ECHO)) + dest = unf_calc_big_cmnd_pld_buffer(xchg, v_pkg->cmnd); + else + dest = unf_calc_other_pld_buffer(xchg); + + if (!dest) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + return NULL; + } + + if (((xchg->fcp_sfs_union.sfs_entry.cur_offset + + v_pkg->unf_cmnd_pload_bl.length) > + (unsigned int)sizeof(union unf_sfs_u)) && + (v_pkg->cmnd != ELS_RSCN) && + (v_pkg->cmnd != ELS_ECHO)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) excange(0x%p) command(0x%x,0x%x) copy payload overrun(0x%x:0x%x:0x%x)", + v_lport->port_id, xchg, v_pkg->cmnd, + xchg->hot_pool_tag, + xchg->fcp_sfs_union.sfs_entry.cur_offset, + v_pkg->unf_cmnd_pload_bl.length, + (unsigned int)sizeof(union unf_sfs_u)); + + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + return NULL; + } + + length = v_pkg->unf_cmnd_pload_bl.length; + if (length > 0) + memcpy(dest, v_pkg->unf_cmnd_pload_bl.buffer_ptr, length); + + xchg->fcp_sfs_union.sfs_entry.cur_offset += length; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + return xchg; +} + +static unsigned int unf_logo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_rport_s *logo_rport = NULL; + struct unf_logo_s *logo = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + struct unf_rjt_info_s rjt_info = { 0 }; + + UNF_REFERNCE_VAR(logo); + UNF_CHECK_VALID(0x3514, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3515, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_LOGO); + logo = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->logo; + nport_id = logo->payload.nport_id & UNF_NPORTID_MASK; + + if (v_sid < UNF_FC_FID_DOM_MGR) { + /* R_Port is not fabric port */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]LOGIN: Receive LOGO. Port(0x%x)<---RPort(0x%x) NPort_ID(0x%x) OXID(0x%x)", + v_lport->port_id, v_sid, nport_id, v_xchg->ox_id); + } + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, &logo->payload, + sizeof(struct unf_logo_payload_s)); + + /* + * 1. S_ID unequal to NPort_ID: + * link down Rport find by NPort_ID immediately + */ + if (nport_id != v_sid) { + logo_rport = unf_get_rport_by_nport_id(v_lport, nport_id); + if (logo_rport) + unf_rport_immediate_linkdown(v_lport, logo_rport); + } + + /* 2. Get R_Port by S_ID (frame header) */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + rport = unf_get_safe_rport(v_lport, rport, UNF_RPORT_REUSE_INIT, + v_sid); // INIT + if (!rport) { + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = ELS_LOGO; + rjt_info.reason_code = UNF_LS_RJT_LOGICAL_ERROR; + rjt_info.reason_explanation = UNF_LS_RJT_NO_ADDITIONAL_INFO; + ret = unf_send_els_rjt_by_did(v_lport, v_xchg, v_sid, + &rjt_info); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) receive LOGO but has no RPort(0x%x)", + v_lport->port_id, v_sid); + + return ret; + } + + /* + * 3. I/O resource release: set ABORT tag + * + * Call by: R_Port remove; RCVD LOGO; RCVD PLOGI; send PLOGI ACC + */ + unf_cm_xchg_mgr_abort_io_by_id(v_lport, rport, v_sid, v_lport->nport_id, + INI_IO_STATE_LOGO); + + /* 4. Send LOGO ACC */ + ret = unf_send_logo_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send LOGO failed", + v_lport->port_id); + + /* + * 5. Do same operations with RCVD LOGO/PRLO & Send LOGO: + * retry (LOGIN or LOGO) or link down immediately + */ + unf_process_rport_after_logo(v_lport, rport); + + return ret; +} + +static unsigned int unf_prlo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_prli_prlo_s *prlo = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_REFERNCE_VAR(prlo); + UNF_CHECK_VALID(0x3516, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3517, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive PRLO. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_LOGO); + + /* Get (new) R_Port */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_INIT, v_sid); /* INIT */ + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) receive PRLO but has no RPort", + v_lport->port_id); + + /* Discard directly */ + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + prlo = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prlo; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, &prlo->payload, + sizeof(struct unf_pril_payload_s)); + + /* Send PRLO ACC to remote */ + ret = unf_send_prlo_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send PRLO ACC failed", + v_lport->port_id); + + /* Enter Enhanced action after LOGO (retry LOGIN or LOGO) */ + unf_process_rport_after_logo(v_lport, rport); + + UNF_REFERNCE_VAR(prlo); + return ret; +} + +static void unf_fill_echo_acc_pld(struct unf_echo_s *v_echo_acc) +{ + struct unf_echo_payload_s *echo_acc_pld = NULL; + + UNF_CHECK_VALID(0x3518, UNF_TRUE, v_echo_acc, return); + + echo_acc_pld = v_echo_acc->echo_pld; + UNF_CHECK_VALID(0x3519, UNF_TRUE, echo_acc_pld, return); + + echo_acc_pld->cmnd = UNF_ELS_CMND_ACC; +} + +static void unf_echo_acc_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport; + + UNF_CHECK_VALID(0x3517, UNF_TRUE, v_xchg, return); + + lport = v_xchg->lport; + + UNF_CHECK_VALID(0x3517, UNF_TRUE, lport, return); + if (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr) { + pci_unmap_single( + lport->low_level_func.dev, + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr, + UNF_ECHO_PAYLOAD_LEN, + DMA_BIDIRECTIONAL); + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr = 0; + } +} + +static unsigned int unf_send_echo_acc(struct unf_lport_s *v_lport, + unsigned int v_did, + struct unf_xchg_s *v_xchg) +{ + struct unf_echo_s *echo_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + dma_addr_t phy_echo_acc_addr; + struct unf_rjt_info_s rjt_info = { 0 }; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + UNF_CHECK_VALID(0x3520, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3521, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_ECHO); + v_xchg->did = v_did; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_echo_acc_callback; + + unf_fill_package(&pkg, v_xchg, v_xchg->rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + echo_acc = &fc_entry->echo_acc; + unf_fill_echo_acc_pld(echo_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + phy_echo_acc_addr = pci_map_single(v_lport->low_level_func.dev, + echo_acc->echo_pld, + UNF_ECHO_PAYLOAD_LEN, + DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(v_lport->low_level_func.dev, + phy_echo_acc_addr)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) pci map err", + v_lport->port_id); + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + echo_acc->phy_echo_addr = phy_echo_acc_addr; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) { + pci_unmap_single(v_lport->low_level_func.dev, + phy_echo_acc_addr, + UNF_ECHO_PAYLOAD_LEN, + DMA_BIDIRECTIONAL); + echo_acc->phy_echo_addr = 0; + if (ret == UNF_RETURN_NOT_SUPPORT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) send ECHO reject to RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + v_lport->port_id, v_did, ox_id, rx_id); + + rjt_info.els_cmnd_code = ELS_ECHO; + rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; + unf_send_els_rjt_by_rport(v_lport, v_xchg, + v_xchg->rport, + &rjt_info); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send ECHO ACC to RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x) failed", + v_lport->port_id, v_did, ox_id, rx_id); + + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + } + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_echo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_echo_payload_s *echo_pld = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int data_len = 0; + + UNF_CHECK_VALID(0x3522, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3523, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + data_len = v_xchg->fcp_sfs_union.sfs_entry.cur_offset; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Receive ECHO. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x))", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_ECHO); + echo_pld = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, echo_pld, data_len); + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + v_xchg->rport = rport; + + ret = unf_send_echo_acc(v_lport, v_sid, v_xchg); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ECHO ACC failed", + v_lport->port_id); + + UNF_REFERNCE_VAR(echo_pld); + UNF_REFERNCE_VAR(data_len); + return ret; +} + +static unsigned int unf_check_els_cmnd_valid(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg, + struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = v_lport; + struct unf_frame_pkg_s *ppkg = v_fra_pkg; + struct unf_xchg_s *xchg = v_xchg; + struct unf_rjt_info_s rjt_info = { 0 }; + struct unf_lport_s *vport = NULL; + unsigned int sid = 0; + unsigned int did = 0; + + sid = (ppkg->frame_head.csctl_sid) & UNF_NPORTID_MASK; + did = (ppkg->frame_head.rctl_did) & UNF_NPORTID_MASK; + + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; + + if ((ppkg->cmnd == ELS_FLOGI) && + (lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) receive FLOGI in top (0x%x) and send LS_RJT", + lport->port_id, lport->en_act_topo); + + rjt_info.els_cmnd_code = ELS_FLOGI; + (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); + + return UNF_RETURN_ERROR; + } + + if ((ppkg->cmnd == ELS_PLOGI) && (did >= UNF_FC_FID_DOM_MGR)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x)receive PLOGI with wellknown address(0x%x) and Send LS_RJT", + lport->port_id, did); + + rjt_info.els_cmnd_code = ELS_PLOGI; + (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); + + return UNF_RETURN_ERROR; + } + + if (((lport->nport_id == 0) || + (lport->nport_id == INVALID_VALUE32)) && + (NEED_REFRESH_NPORTID(ppkg))) { + lport->nport_id = did; + } else if ((did != lport->nport_id) && (ppkg->cmnd != ELS_FLOGI)) { + vport = unf_cm_lookup_vport_by_did(lport, did); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) receive ELS cmd(0x%x) with abnormal D_ID(0x%x)", + lport->nport_id, ppkg->cmnd, did); + + unf_cm_free_xchg(lport, xchg); + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +static unsigned int unf_rcv_els_cmnd_req(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int i = 0; + unsigned int sid = 0; + unsigned int did = 0; + struct unf_lport_s *vport = NULL; + unsigned int (*pfn_els_cmnd_handler)(struct unf_lport_s *, unsigned int, + struct unf_xchg_s *) = NULL; + + sid = (v_fra_pkg->frame_head.csctl_sid) & UNF_NPORTID_MASK; + did = (v_fra_pkg->frame_head.rctl_did) & UNF_NPORTID_MASK; + + xchg = unf_mv_data_2_xchg(v_lport, v_fra_pkg); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) receive ElsCmnd(0x%x), exchange is NULL", + v_lport->port_id, v_fra_pkg->cmnd); + + return UNF_RETURN_ERROR; + } + + if (v_fra_pkg->last_pkg_flag != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Exchange(%u) waiting for last WQE", + xchg->hot_pool_tag); + + return RETURN_OK; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Exchange(%u) get last WQE", xchg->hot_pool_tag); + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) { + xchg->ox_id = UNF_GET_OXID(v_fra_pkg); + xchg->abort_oxid = xchg->ox_id; + xchg->rx_id = xchg->hot_pool_tag; + } + xchg->cmnd_code = v_fra_pkg->cmnd; + + ret = unf_check_els_cmnd_valid(v_lport, v_fra_pkg, xchg); + if (ret != RETURN_OK) { + /* NOTE: exchange has been released */ + return UNF_RETURN_ERROR; + } + + if ((did != v_lport->nport_id) && (v_fra_pkg->cmnd != ELS_FLOGI)) { + vport = unf_cm_lookup_vport_by_did(v_lport, did); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) received unknown ELS command with S_ID(0x%x) D_ID(0x%x))", + v_lport->port_id, sid, did); + + return UNF_RETURN_ERROR; + } + v_lport = vport; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]VPort(0x%x) received ELS command with S_ID(0x%x) D_ID(0x%x)", + v_lport->port_id, sid, did); + } + + do { + if ((v_fra_pkg->cmnd) == els_handle[i].cmnd) { + pfn_els_cmnd_handler = + els_handle[i].pfn_els_cmnd_handler; + break; + } + + i++; + } while (i < (sizeof(els_handle) / + sizeof(struct unf_els_handler_table))); + + if (pfn_els_cmnd_handler) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) receive ELS(0x%x) from RPort(0x%x) and process it", + v_lport->port_id, v_fra_pkg->cmnd, sid); + + ret = pfn_els_cmnd_handler(v_lport, sid, xchg); + } else { + ret = unf_els_cmnd_default_handler(v_lport, xchg, sid, + v_fra_pkg->cmnd); + } + + return ret; +} + +static unsigned int unf_send_els_rsp_succ(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + void (*pfn_ob_callback)(struct unf_xchg_s *) = NULL; + + UNF_CHECK_VALID(0x3529, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3530, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if (!v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) lookup exchange by tag function is NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag( + (void *)v_lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) find exhange by tag(0x%x) failed", + v_lport->port_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_ob_callback) && + (!(xchg->io_state & TGT_IO_STATE_ABORT))) { + pfn_ob_callback = xchg->pfn_ob_callback; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) with exchange(0x%p) tag(%u) do callback", + v_lport->port_id, xchg, hot_pool_tag); + + pfn_ob_callback(xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + return ret; +} + +static unsigned char *unf_calc_big_resp_pld_buffer(struct unf_xchg_s *v_xchg, + unsigned int v_cmnd_code) +{ + unsigned char *resp_pld = NULL; + unsigned char *dest = NULL; + + UNF_CHECK_VALID(0x3510, UNF_TRUE, v_xchg, return NULL); + + if (v_cmnd_code == ELS_ECHO) + resp_pld = (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; + else + resp_pld = (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->get_id.gid_rsp.gid_acc_pld; + + if (resp_pld) + dest = (unsigned char *) + (resp_pld + v_xchg->fcp_sfs_union.sfs_entry.cur_offset); + + return dest; +} + +static unsigned char *unf_calc_other_resp_pld_buffer(struct unf_xchg_s *v_xchg) +{ + unsigned char *dest = NULL; + unsigned int offset = 0; + + UNF_CHECK_VALID(0x3511, UNF_TRUE, v_xchg, return NULL); + + offset = v_xchg->fcp_sfs_union.sfs_entry.cur_offset; + dest = (unsigned char *)((unsigned char *) + (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + offset); + + return dest; +} + +static unsigned int unf_mv_resp_2_xchg(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned char *dest = NULL; + unsigned int length = 0; + unsigned int offset = 0; + unsigned int max_frame_len = 0; + unsigned long flags = 0; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + + if (UNF_NEED_BIG_RESPONSE_BUFF(v_xchg->cmnd_code)) { + dest = unf_calc_big_resp_pld_buffer(v_xchg, + v_xchg->cmnd_code); + offset = 0; + max_frame_len = sizeof(struct unf_gif_acc_pld_s); + } else if (v_xchg->cmnd_code == NS_GA_NXT || + v_xchg->cmnd_code == NS_GIEL) { + dest = unf_calc_big_resp_pld_buffer(v_xchg, + v_xchg->cmnd_code); + offset = 0; + max_frame_len = + v_xchg->fcp_sfs_union.sfs_entry.sfs_buff_len; + } else { + dest = unf_calc_other_resp_pld_buffer(v_xchg); + offset = sizeof(struct unf_fchead_s); + max_frame_len = sizeof(union unf_sfs_u); + } + + if (!dest) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + return UNF_RETURN_ERROR; + } + + if (v_xchg->fcp_sfs_union.sfs_entry.cur_offset == 0) { + v_xchg->fcp_sfs_union.sfs_entry.cur_offset += offset; + dest = dest + offset; + } + + length = v_pkg->unf_cmnd_pload_bl.length; + + if ((v_xchg->fcp_sfs_union.sfs_entry.cur_offset + length) > + max_frame_len) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Exchange(0x%p) command(0x%x) hotpooltag(0x%x) OX_RX_ID(0x%x) S_ID(0x%x) D_ID(0x%x) copy payload overrun(0x%x:0x%x:0x%x)", + v_xchg, v_xchg->cmnd_code, v_xchg->hot_pool_tag, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, + v_pkg->frame_head.rctl_did & UNF_NPORTID_MASK, + v_xchg->fcp_sfs_union.sfs_entry.cur_offset, + v_pkg->unf_cmnd_pload_bl.length, + max_frame_len); + + length = max_frame_len - v_xchg->fcp_sfs_union.sfs_entry.cur_offset; + } + + if (length > 0) + memcpy(dest, v_pkg->unf_cmnd_pload_bl.buffer_ptr, length); + + v_xchg->fcp_sfs_union.sfs_entry.cur_offset += length; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + return RETURN_OK; +} + +static unsigned int unf_send_els_cmnd_succ(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + void (*pfn_callback)(void *, void *, void *) = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3531, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3532, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = v_lport; + + if (!lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) lookup exchange by tag function can't be NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag((void *)lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", + lport->port_id, lport->nport_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + if (((v_fra_pkg->frame_head.csctl_sid) & UNF_NPORTID_MASK) != + xchg->did) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find exhange invalid, package S_ID(0x%x) exchange S_ID(0x%x) D_ID(0x%x)", + lport->port_id, v_fra_pkg->frame_head.csctl_sid, + xchg->sid, xchg->did); + + return UNF_RETURN_ERROR; + } + + if (v_fra_pkg->last_pkg_flag == UNF_PKG_NOT_LAST_RESPONSE) { + ret = unf_mv_resp_2_xchg(xchg, v_fra_pkg); + + return ret; + } + + xchg->byte_orders = v_fra_pkg->byte_orders; + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_callback) && + ((xchg->cmnd_code == ELS_RRQ) || + (xchg->cmnd_code == ELS_LOGO) || + (!(xchg->io_state & TGT_IO_STATE_ABORT)))) { + pfn_callback = xchg->pfn_callback; + + if ((xchg->cmnd_code == ELS_FLOGI) || + (xchg->cmnd_code == ELS_FDISC)) + xchg->sid = v_fra_pkg->frame_head.rctl_did & + UNF_NPORTID_MASK; + + if (xchg->cmnd_code == ELS_ECHO) { + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME]; + xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME]; + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME]; + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* Do callback */ + pfn_callback(xchg->lport, xchg->rport, xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)lport, (void *)xchg); + return ret; +} + +static unsigned int unf_send_els_cmnd_failed(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + void (*pfn_ob_callback)(struct unf_xchg_s *) = NULL; + + UNF_CHECK_VALID(0x3533, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3534, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if (!v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) lookup exchange by tag function can't be NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag( + (void *)v_lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) find exhange by tag(0x%x) failed", + v_lport->port_id, v_lport->nport_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + v_lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_ob_callback) && + ((xchg->cmnd_code == ELS_RRQ) || + (xchg->cmnd_code == ELS_LOGO) || + (!(xchg->io_state & TGT_IO_STATE_ABORT)))) { + pfn_ob_callback = xchg->pfn_ob_callback; + xchg->ob_callback_sts = v_fra_pkg->status; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + pfn_ob_callback(xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) exchange(0x%p) tag(0x%x) do callback", + v_lport->port_id, xchg, hot_pool_tag); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + return ret; +} + +static unsigned int unf_rcv_els_cmnd_reply(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3535, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3536, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if ((v_fra_pkg->status == UNF_IO_SUCCESS) || + (v_fra_pkg->status == UNF_IO_UNDER_FLOW)) + ret = unf_send_els_cmnd_succ(v_lport, v_fra_pkg); + else + ret = unf_send_els_cmnd_failed(v_lport, v_fra_pkg); + + return ret; +} + +void unf_lport_enter_msn_plogi(struct unf_lport_s *v_lport) +{ + /* Fabric or Public Loop Mode: Login with Name server */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_plogi_payload_s *plogi_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg; + + UNF_CHECK_VALID(0x1811, UNF_TRUE, v_lport, return); + + /* Get (safe) R_Port */ + rport = unf_rport_get_free_and_init(v_lport, UNF_PORT_TYPE_FC, + UNF_FC_FID_MGMT_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_MGMT_SERV; // 0xfffffa + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get & Set new free exchange */ + xchg = unf_cm_get_free_xchg(v_lport, UNF_XCHG_TYPE_SFS); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PLOGI", + v_lport->port_id); + + return; + } + + xchg->cmnd_code = ELS_PLOGI; // PLOGI + xchg->did = rport->nport_id; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = lport; + xchg->rport = rport; + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) + xchg->ox_id = xchg->hot_pool_tag; + + /* Set callback function */ + xchg->pfn_callback = NULL; // for rcvd plogi acc/rjt processer + xchg->pfn_ob_callback = NULL; // for send plogi failed processer + + unf_fill_package(&pkg, xchg, rport); + + /* Fill PLOGI payload */ + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return; + } + + plogi_pld = &fc_entry->plogi.payload; + memset(plogi_pld, 0, sizeof(struct unf_plogi_payload_s)); + unf_fill_plogi_pld(plogi_pld, v_lport); + + /* Start to Send PLOGI command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); +} + +static void unf_register_to_switch(struct unf_lport_s *v_lport) +{ + /* Register to Fabric, used for: FABRIC & PUBLI LOOP */ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3542, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + /* LPort: FLOGI_WAIT --> PLOGI_WAIT */ + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + /* Login with Name server: PLOGI */ + unf_lport_enter_sns_plogi(v_lport); + + unf_lport_enter_msn_plogi(v_lport); + + if ((v_lport->root_lport == v_lport) &&/* Physical Port */ + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) { + unf_linkup_all_vports(v_lport); + } +} + +void unf_login_with_loop_node(struct unf_lport_s *v_lport, unsigned int v_alpa) +{ + /* Only used for Private Loop LOGIN */ + struct unf_rport_s *rport = NULL; + unsigned long rport_flag = 0; + unsigned int port_feature = 0; + unsigned int ret; + + /* Check AL_PA validity */ + if (v_lport->nport_id == v_alpa) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) is the same as RPort with AL_PA(0x%x), do nothing", + v_lport->port_id, v_alpa); + return; + } + + if (v_alpa == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) is fabric, do nothing", + v_lport->port_id, v_alpa); + return; + } + + /* Get & set R_Port: reuse only */ + rport = unf_get_rport_by_nport_id(v_lport, v_alpa); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) RPort(0x%x_0x%p) login with private loop", + v_lport->port_id, v_lport->nport_id, v_alpa, rport); + + rport = unf_get_safe_rport(v_lport, rport, UNF_RPORT_REUSE_ONLY, + v_alpa); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) allocate new RPort(0x%x) failed", + v_lport->port_id, v_lport->nport_id, v_alpa); + return; + } + + /* Update R_Port state & N_Port_ID */ + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + rport->nport_id = v_alpa; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); // PLOGI_WAIT + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + + /* Private Loop: check whether need delay to send PLOGI or not */ + port_feature = rport->options; + + /* check Rport and Lport feature */ + if ((port_feature == UNF_PORT_MODE_UNKNOWN) && + (v_lport->options == UNF_PORT_MODE_INI)) { + /* Start to send PLOGI */ + ret = unf_send_plogi(v_lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI to RPort(0x%x) failed", + v_lport->port_id, v_lport->nport_id, + rport->nport_id); + + unf_rport_error_recovery(rport); + } + } else { + unf_check_rport_need_delay_plogi(v_lport, rport, port_feature); + } +} + +unsigned int unf_receive_els_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3543, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3544, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + + switch (v_fra_pkg->type) { + case UNF_PKG_ELS_REQ_DONE: + ret = unf_rcv_els_cmnd_reply(lport, v_fra_pkg); + break; + + case UNF_PKG_ELS_REQ: + ret = unf_rcv_els_cmnd_req(lport, v_fra_pkg); + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) with exchange type(0x%x) abnormal", + lport->port_id, lport->nport_id, v_fra_pkg->type); + break; + } + + return ret; +} + +unsigned int unf_send_els_done(void *v_lport, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3545, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3546, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + + if (v_pkg->type == UNF_PKG_ELS_REPLY_DONE) { + if ((v_pkg->status == UNF_IO_SUCCESS) || + (v_pkg->status == UNF_IO_UNDER_FLOW)) + ret = unf_send_els_rsp_succ(v_lport, v_pkg); + else + ret = unf_send_els_cmnd_failed(v_lport, v_pkg); + } + + return ret; +} + +static unsigned int unf_rcv_gs_cmnd_reply(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned short hot_pool_tag = 0; + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + void (*pfn_callback)(void *, void *, void *) = NULL; + + UNF_CHECK_VALID(0x3553, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3554, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = v_lport; + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + + xchg = (struct unf_xchg_s *)unf_cm_lookup_xchg_by_tag( + (void *)lport, hot_pool_tag); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find exhange by tag(0x%x) failed", + lport->port_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + if (v_fra_pkg->last_pkg_flag == UNF_PKG_NOT_LAST_RESPONSE) { + ret = unf_mv_resp_2_xchg(xchg, v_fra_pkg); + return ret; + } + + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_callback) && (!(xchg->io_state & TGT_IO_STATE_ABORT))) { + pfn_callback = xchg->pfn_callback; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + pfn_callback(xchg->lport, xchg->rport, xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)lport, (void *)xchg); + return ret; +} + +static unsigned int unf_send_gs_cmnd_failed(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + + void (*pfn_ob_callback)(struct unf_xchg_s *) = NULL; + + UNF_CHECK_VALID(0x3555, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3556, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if (!v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) loopup exchange by tag function can't be NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag( + (void *)v_lport, + hot_pool_tag)); + + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find exhange by tag(0x%x)", + v_lport->port_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + v_lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_ob_callback) && + (!(xchg->io_state & TGT_IO_STATE_ABORT))) { + pfn_ob_callback = xchg->pfn_ob_callback; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + pfn_ob_callback(xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + return ret; +} + +unsigned int unf_receive_gs_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3557, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3558, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + + if ((v_fra_pkg->type) == UNF_PKG_GS_REQ_DONE) { + if ((v_fra_pkg->status == UNF_IO_SUCCESS) || + (v_fra_pkg->status == UNF_IO_UNDER_FLOW) || + (v_fra_pkg->status == UNF_IO_OVER_FLOW)) + ret = unf_rcv_gs_cmnd_reply(lport, v_fra_pkg); + else + ret = unf_send_gs_cmnd_failed(lport, v_fra_pkg); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) exchange type(0x%x) mismatch", + lport->port_id, v_fra_pkg->type); + + return UNF_RETURN_ERROR; + } + + return ret; +} + +static void unf_handle_init_gid_acc(struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + /* + * from SCR ACC callback + * NOTE: inquiry disc R_Port used for NPIV + */ + struct unf_disc_rport_s *disc_rport = NULL; + struct unf_disc_s *disc = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int gid_port_id = 0; + unsigned int nport_id = 0; + unsigned int i = 0; + unsigned char control = 0; + + UNF_CHECK_VALID(0x3559, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3560, UNF_TRUE, v_lport, return); + + /* + * 1. Find & Check & Get (new) R_Port from list_disc_rports_pool + * then, Add to R_Port Disc_busy_list + */ + while (i < UNF_GID_PORT_CNT) { + gid_port_id = (v_gid_acc_pld->gid_port_id[i]); + nport_id = UNF_NPORTID_MASK & gid_port_id; + control = UNF_GID_CONTROL(gid_port_id); + + /* for each N_Port_ID from GID_ACC payload */ + if ((nport_id != v_lport->nport_id) && (nport_id != 0) && + (!unf_lookup_lport_by_nport_id(v_lport, nport_id))) { + /* for New Port, not L_Port */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) get nportid(0x%x) from GID_ACC", + v_lport->port_id, v_lport->nport_id, + nport_id); + + /* Get R_Port from list of RPort Disc Pool */ + disc_rport = + unf_rport_get_free_and_init(v_lport, + UNF_PORT_TYPE_DISC, + nport_id); + if (!disc_rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) can't allocate new rport(0x%x) from disc pool", + v_lport->port_id, + v_lport->nport_id, + nport_id); + + i++; + continue; + } + } + + if ((control & UNF_GID_LAST_PORT_ID) == UNF_GID_LAST_PORT_ID) + break; + + i++; + } + + /* + * 2. Do port disc stop operation: + * NOTE: Do DISC & release R_Port from + * busy_list back to list_disc_rports_pool + */ + disc = &v_lport->disc; + if (!disc->unf_disc_temp.pfn_unf_disc_stop) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) disc stop function is NULL", + v_lport->port_id, v_lport->nport_id); + + return; + } + + ret = disc->unf_disc_temp.pfn_unf_disc_stop(v_lport); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) do disc stop failed", + v_lport->port_id, v_lport->nport_id); +} + +void unf_rport_immediate_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Swap case: Report Link Down immediately & release R_Port */ + unsigned long flags = 0; + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x3561, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3562, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + /* 1. Inc R_Port ref_cnt */ + if (unf_rport_ref_inc(v_rport) != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) Rport(0x%p,0x%x) is removing and no need process", + v_lport->port_id, v_rport, v_rport->nport_id); + + return; + } + + /* 2. R_PORT state update: Link Down Event --->>> closing state */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* 3. Put R_Port from busy to destroy list */ + disc = &v_lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flags); + list_del_init(&v_rport->entry_rport); + list_add_tail(&v_rport->entry_rport, &disc->list_destroy_rports); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags); + + /* 4. Schedule Closing work (Enqueuing workqueue) */ + unf_schedule_closing_work(v_lport, v_rport); + + unf_rport_ref_dec(v_rport); +} + +static unsigned int unf_rport_check_wwn(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send GPN_ID */ + struct unf_rport_s *sns_port = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3564, UNF_TRUE, v_lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3565, UNF_TRUE, v_rport, + return UNF_RETURN_ERROR); + + /* Get SNS R_Port */ + sns_port = unf_get_rport_by_nport_id(v_lport, + UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find fabric Port", + v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + /* Send GPN_ID to SW */ + ret = unf_get_and_post_disc_event(v_lport, sns_port, v_rport->nport_id, + UNF_DISC_GET_PORT_NAME); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + v_lport->nport_id, UNF_DISC_GET_PORT_NAME, + v_rport->nport_id); + + unf_rcv_gpn_id_rsp_unknown(v_lport, v_rport->nport_id); + } + + return ret; +} + +static unsigned int unf_handle_rscn_port_not_in_disc( + struct unf_lport_s *v_lport, + unsigned int v_rscn_nport_id) +{ + /* RSCN Port_ID not in GID_ACC payload table: Link Down */ + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3566, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* from R_Port busy list by N_Port_ID */ + rport = unf_get_rport_by_nport_id(v_lport, v_rscn_nport_id); + if (rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) RPort(0x%x) wwpn(0x%llx) has been removed and link down it", + v_lport->port_id, v_rscn_nport_id, + rport->port_name); + + unf_rport_linkdown(v_lport, rport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) has no RPort(0x%x) and do nothing", + v_lport->nport_id, v_rscn_nport_id); + } + + return ret; +} + +static unsigned int unf_handle_rscn_port_in_disc(struct unf_lport_s *v_lport, + unsigned int v_rscn_nport_id) +{ + /* Send GPN_ID or re-login(GNN_ID) */ + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3567, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* from R_Port busy list by N_Port_ID */ + rport = unf_get_rport_by_nport_id(v_lport, v_rscn_nport_id); + if (rport) { + /* R_Port exist: send GPN_ID */ + ret = unf_rport_check_wwn(v_lport, rport); + } else { + if ((v_lport->options & UNF_PORT_MODE_INI) == + UNF_PORT_MODE_INI) { + /* Re-LOGIN with INI mode: Send GNN_ID */ + ret = unf_rport_relogin(v_lport, v_rscn_nport_id); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) with no INI feature. Do nothing", + v_lport->nport_id); + } + } + + return ret; +} + +static unsigned int unf_handle_rscn_port_addr( + struct unf_port_id_page_s *v_port_id_page, + struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + /* + * Input parameters: + * 1. Port_ID_page: saved from RSCN payload + * 2. GID_ACC_payload: back from GID_ACC (GID_PT or GID_FT) + ** + * Do work: check whether RSCN Port_ID within GID_ACC payload or not + * then, re-login or link down rport + */ + unsigned int rscn_nport_id = 0; + unsigned int gid_port_id = 0; + unsigned int nport_id = 0; + unsigned int i = 0; + unsigned char control = 0; + unsigned int ret = RETURN_OK; + enum int_e have_same_id = UNF_FALSE; + + UNF_CHECK_VALID(0x3568, UNF_TRUE, v_port_id_page, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3569, UNF_TRUE, v_gid_acc_pld, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3570, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* 1. get RSCN_NPort_ID from (L_Port->Disc->RSCN_Mgr)->RSCN_Port_ID_Page */ + rscn_nport_id = UNF_SERVICE_GET_NPORTID_FORM_GID_PAGE(v_port_id_page); + + /* + * 2. for RSCN_NPort_ID + * check whether RSCN_NPort_ID within GID_ACC_Payload or not + */ + while (i < UNF_GID_PORT_CNT) { /* 4k */ + gid_port_id = (v_gid_acc_pld->gid_port_id[i]); + nport_id = UNF_NPORTID_MASK & gid_port_id; + control = UNF_GID_CONTROL(gid_port_id); + + if ((v_lport->nport_id != nport_id) && (nport_id != 0)) { + /* is not L_Port */ + if (rscn_nport_id == nport_id) { + /* RSCN Port_ID within GID_ACC payload */ + have_same_id = UNF_TRUE; + break; + } + } + + if ((control & UNF_GID_LAST_PORT_ID) == UNF_GID_LAST_PORT_ID) + break; + + i++; + } + + /* 3. RSCN_Port_ID not within GID_ACC payload table */ + if (have_same_id == UNF_FALSE) { + /* rport has been removed */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x_0x%x) find RSCN N_Port_ID(0x%x) in GID_ACC table failed", + v_lport->port_id, v_lport->nport_id, + rscn_nport_id); + + /* Link down rport */ + ret = unf_handle_rscn_port_not_in_disc(v_lport, + rscn_nport_id); + } else { /* 4. RSCN_Port_ID within GID_ACC payload table */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x_0x%x) find RSCN N_Port_ID(0x%x) in GID_ACC table succeed", + v_lport->port_id, v_lport->nport_id, + rscn_nport_id); + + /* Re-login with INI mode */ + ret = unf_handle_rscn_port_in_disc(v_lport, rscn_nport_id); + } + + return ret; +} + +static void unf_check_rport_rscn_process( + struct unf_rport_s *v_rport, + struct unf_port_id_page_s *v_port_id_page) +{ + struct unf_rport_s *rport = v_rport; + struct unf_port_id_page_s *port_id_page = v_port_id_page; + unsigned char format = port_id_page->uc_addr_format; + + switch (format) { + /* domain+area */ + case UNF_RSCN_AREA_ADDR_GROUP: + if (UNF_GET_DOMAIN_ID(rport->nport_id) == + port_id_page->port_id_domain && + UNF_GET_AREA_ID(rport->nport_id) == + port_id_page->port_id_area) { + rport->rscn_position = UNF_RPORT_NEED_PROCESS; + } + break; + /* domain */ + case UNF_RSCN_DOMAIN_ADDR_GROUP: + if (UNF_GET_DOMAIN_ID(rport->nport_id) == + port_id_page->port_id_domain) + rport->rscn_position = UNF_RPORT_NEED_PROCESS; + break; + /* all */ + case UNF_RSCN_FABRIC_ADDR_GROUP: + rport->rscn_position = UNF_RPORT_NEED_PROCESS; + break; + default: + break; + } +} + +static void unf_set_rport_rscn_position( + struct unf_lport_s *v_lport, + struct unf_port_id_page_s *v_port_id_page) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long disc_flag = 0; + unsigned long rport_flag = 0; + + UNF_CHECK_VALID(0x3571, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + if (rport->rscn_position == UNF_RPORT_NOT_NEED_PROCESS) + unf_check_rport_rscn_process(rport, + v_port_id_page); + } else { + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); +} + +static void unf_set_rport_rscn_position_local(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long disc_flag = 0; + unsigned long rport_flag = 0; + + UNF_CHECK_VALID(0x3572, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + if (rport->rscn_position == UNF_RPORT_NEED_PROCESS) + rport->rscn_position = + UNF_RPORT_ONLY_IN_LOCAL_PROCESS; + } else { + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); +} + +static void unf_reset_rport_rscn_setting(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long rport_flag = 0; + + UNF_CHECK_VALID(0x3573, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } +} + +static void unf_compare_nport_id_with_rport_list( + struct unf_lport_s *v_lport, + unsigned int v_nport_id, + struct unf_port_id_page_s *v_port_id_page) +{ + struct unf_rport_s *rport = NULL; + unsigned long rport_flag = 0; + unsigned char format = v_port_id_page->uc_addr_format; + + UNF_CHECK_VALID(0x3574, UNF_TRUE, v_lport, return); + + switch (format) { + /* domain+area */ + case UNF_RSCN_AREA_ADDR_GROUP: + if ((UNF_GET_DOMAIN_ID(v_nport_id) != + v_port_id_page->port_id_domain) || + (UNF_GET_AREA_ID(v_nport_id) != + v_port_id_page->port_id_area)) + return; + break; + /* domain */ + case UNF_RSCN_DOMAIN_ADDR_GROUP: + if (UNF_GET_DOMAIN_ID(v_nport_id) != + v_port_id_page->port_id_domain) + return; + break; + /* all */ + case UNF_RSCN_FABRIC_ADDR_GROUP: + break; + /* can't enter this branch guarantee by outer */ + default: + break; + } + + rport = unf_get_rport_by_nport_id(v_lport, v_nport_id); + + if (!rport) { + if ((v_lport->options & UNF_PORT_MODE_INI) == + UNF_PORT_MODE_INI) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[event]Port(0x%x) Find Rport(0x%x) by RSCN", + v_lport->nport_id, v_nport_id); + unf_rport_relogin(v_lport, v_nport_id); + } + } else { + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + if (rport->rscn_position == UNF_RPORT_NEED_PROCESS) + rport->rscn_position = + UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS; + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } +} + +static void unf_compare_disc_with_local_rport( + struct unf_lport_s *v_lport, + struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_port_id_page_s *v_port_id_page) +{ + unsigned int gid_port_id = 0; + unsigned int nport_id = 0; + unsigned int i = 0; + unsigned char control = 0; + + UNF_CHECK_VALID(0x3575, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3576, UNF_TRUE, v_lport, return); + + while (i < UNF_GID_PORT_CNT) { + gid_port_id = (v_gid_acc_pld->gid_port_id[i]); + nport_id = UNF_NPORTID_MASK & gid_port_id; + control = UNF_GID_CONTROL(gid_port_id); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) DISC N_Port_ID(0x%x)", + v_lport->nport_id, nport_id); + + if ((nport_id != 0) && + (!unf_lookup_lport_by_nport_id(v_lport, nport_id))) + unf_compare_nport_id_with_rport_list(v_lport, nport_id, + v_port_id_page); + + if ((UNF_GID_LAST_PORT_ID & control) == UNF_GID_LAST_PORT_ID) + break; + + i++; + } + + unf_set_rport_rscn_position_local(v_lport); +} + +static unsigned int unf_process_each_rport_after_rscn( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + struct unf_rport_s *v_rport) +{ + unsigned long rport_flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3577, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3578, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3579, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_sns_port); + + spin_lock_irqsave(&v_rport->rport_state_lock, rport_flag); + + if (v_rport->rscn_position == UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x_0x%x) RPort(0x%x) rescan position(0x%x), check wwpn", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport->rscn_position); + v_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_flag); + ret = unf_rport_check_wwn(v_lport, v_rport); + } else if (v_rport->rscn_position == + UNF_RPORT_ONLY_IN_LOCAL_PROCESS) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[event]Port(0x%x_0x%x) RPort(0x%x) rescan position(0x%x), linkdown it", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport->rscn_position); + v_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_flag); + unf_rport_linkdown(v_lport, v_rport); + } else { + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_flag); + } + + return ret; +} + +static unsigned int unf_process_local_rport_after_rscn( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long disc_flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3580, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3581, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + if (list_empty(&disc->list_busy_rports)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + return UNF_RETURN_ERROR; + } + + node = (&disc->list_busy_rports)->next; + + do { + rport = list_entry(node, struct unf_rport_s, entry_rport); + + if (rport->rscn_position == UNF_RPORT_NOT_NEED_PROCESS) { + node = node->next; + continue; + } else { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + disc_flag); + ret = unf_process_each_rport_after_rscn(v_lport, + v_sns_port, + rport); + spin_lock_irqsave(&disc->rport_busy_pool_lock, + disc_flag); + node = (&disc->list_busy_rports)->next; + } + } while (node != &disc->list_busy_rports); + + unf_reset_rport_rscn_setting(v_lport); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + return ret; +} + +static unsigned int unf_handle_rscn_group_addr( + struct unf_port_id_page_s *v_port_id_page, + struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + struct unf_rport_s *sns_port = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3582, UNF_TRUE, v_port_id_page, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3583, UNF_TRUE, v_gid_acc_pld, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3584, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_port_id_page); + + sns_port = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find fabric port failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + unf_set_rport_rscn_position(v_lport, v_port_id_page); + unf_compare_disc_with_local_rport(v_lport, v_gid_acc_pld, + v_port_id_page); + + ret = unf_process_local_rport_after_rscn(v_lport, sns_port); + return ret; +} + +static void unf_handle_rscn_gid_acc(struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + /* for N_Port_ID table return from RSCN */ + struct unf_port_id_page_s *port_id_page = NULL; + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct list_head *list_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3585, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3586, UNF_TRUE, v_lport, return); + rscn_mgr = &v_lport->disc.rscn_mgr; + + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + while (!list_empty(&rscn_mgr->list_using_rscn_page)) { + /* + * for each RSCN_Using_Page(NPortID) + * for each L_Port->Disc->RSCN_Mgr-> + * RSCN_Using_Page(Port_ID_Page) + * NOTE: + * check using_page_port_id whether within + * GID_ACC payload or not + */ + list_node = (&rscn_mgr->list_using_rscn_page)->next; + port_id_page = list_entry(list_node, struct unf_port_id_page_s, + list_node_rscn); + /* NOTE: here delete node (from RSCN using Page) */ + list_del(list_node); + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + + switch (port_id_page->uc_addr_format) { + /* each page of RSNC corresponding one of N_Port_ID */ + case UNF_RSCN_PORT_ADDR: + (void)unf_handle_rscn_port_addr(port_id_page, + v_gid_acc_pld, + v_lport); + break; + + /* each page of RSNC corresponding address group */ + case UNF_RSCN_AREA_ADDR_GROUP: + case UNF_RSCN_DOMAIN_ADDR_GROUP: + case UNF_RSCN_FABRIC_ADDR_GROUP: + (void)unf_handle_rscn_group_addr(port_id_page, + v_gid_acc_pld, + v_lport); + break; + + default: + break; + } + + /* NOTE: release this RSCN_Node */ + rscn_mgr->pfn_unf_release_rscn_node(rscn_mgr, port_id_page); + + /* go to next */ + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + } + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); +} + +static void unf_gid_acc_handle(struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ +#define UNF_NONE_DISC 0X0 /* before enter DISC */ + + struct unf_disc_s *disc = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3587, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3588, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + switch (disc->disc_option) { + case UNF_INIT_DISC: // from SCR callback with INI mode + disc->disc_option = UNF_NONE_DISC; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + /* R_Port from Disc_list */ + unf_handle_init_gid_acc(v_gid_acc_pld, v_lport); + break; + + case UNF_RSCN_DISC: /* from RSCN payload parse(analysis) */ + disc->disc_option = UNF_NONE_DISC; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* R_Port from busy_list */ + unf_handle_rscn_gid_acc(v_gid_acc_pld, v_lport); + break; + + default: + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x)'s disc option(0x%x) is abnormal", + v_lport->port_id, + v_lport->nport_id, + disc->disc_option); + break; + } +} + +static void unf_gid_ft_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + struct unf_xchg_s *xchg = NULL; + union unf_sfs_u *sfs_ptr = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int rjt_reason = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3590, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3591, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3592, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_xchg; + disc = &lport->disc; + + sfs_ptr = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + gid_acc_pld = sfs_ptr->get_id.gid_rsp.gid_acc_pld; + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) GID_FT response payload is NULL", + lport->port_id); + + return; + } + + cmnd_rsp_size = (gid_acc_pld->ctiu_pream.cmnd_rsp_size); + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Process GID_FT ACC */ + unf_gid_acc_handle(gid_acc_pld, lport); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + rjt_reason = (gid_acc_pld->ctiu_pream.frag_reason_exp_vend); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) GID_FT was rejected with reason code(0x%x)", + lport->port_id, rjt_reason); + + if ((rjt_reason & UNF_CTIU_RJT_EXP_MASK) == + UNF_CTIU_RJT_EXP_FC4TYPE_NO_REG) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + unf_gid_acc_handle(gid_acc_pld, lport); + } else { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } + } else { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery operation */ + unf_disc_error_recovery(lport); + } +} + +static void unf_gid_pt_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + struct unf_xchg_s *xchg = NULL; + union unf_sfs_u *sfs_ptr = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int rjt_reason = 0; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3594, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3595, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3596, UNF_TRUE, v_xchg, return); + + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + disc = &lport->disc; + xchg = (struct unf_xchg_s *)v_xchg; + sfs_ptr = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + + gid_acc_pld = sfs_ptr->get_id.gid_rsp.gid_acc_pld; + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) GID_PT response payload is NULL", + lport->port_id); + + return; + } + + cmnd_rsp_size = (gid_acc_pld->ctiu_pream.cmnd_rsp_size); + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + unf_gid_acc_handle(gid_acc_pld, lport); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + rjt_reason = (gid_acc_pld->ctiu_pream.frag_reason_exp_vend); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) GID_PT was rejected with reason code(0x%x)", + lport->port_id, lport->nport_id, rjt_reason); + + if (UNF_CTIU_RJT_EXP_PORTTYPE_NO_REG == + (rjt_reason & UNF_CTIU_RJT_EXP_MASK)) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + unf_gid_acc_handle(gid_acc_pld, lport); + } else { + ret = unf_send_gid_ft(lport, rport); + if (ret != RETURN_OK) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, + flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore( + &disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery */ + unf_disc_error_recovery(lport); + } + } + } else { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery */ + unf_disc_error_recovery(lport); + } +} + +void unf_rcv_gnn_id_rsp_unknown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + /* Send GFF_ID */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *sns_port = v_sns_port; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3606, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3607, UNF_TRUE, v_sns_port, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) Rportid(0x%x) GNN_ID response is unknown. Sending GFF_ID", + lport->port_id, lport->nport_id, v_nport_id); + + ret = unf_get_and_post_disc_event(lport, sns_port, v_nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, v_nport_id); + + /* NOTE: go to next stage */ + unf_rcv_gff_id_rsp_unknown(lport, v_nport_id); // send PLOGI + } +} + +void unf_rcv_gff_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + /* Send PLOGI */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3624, UNF_TRUE, v_lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GFF_ID for RPort(0x%x) but response is unknown", + lport->port_id, v_nport_id); + + /* Get (Safe) R_Port & Set State */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + rport = unf_find_rport(lport, v_nport_id, rport->port_name); + + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) can't get RPort by NPort ID(0x%x), allocate new RPort", + lport->port_id, lport->nport_id, v_nport_id); + + rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, + v_nport_id); + UNF_CHECK_VALID(0x3619, UNF_TRUE, NULL != rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + v_nport_id); + UNF_CHECK_VALID(0x3625, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) can not send PLOGI for RPort(0x%x), enter recovery", + lport->port_id, v_nport_id); + + unf_rport_error_recovery(rport); + } +} + +static void unf_lport_update_nport_id(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3646, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + v_lport->nport_id = v_nport_id; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); +} + +static void unf_lport_update_time_params( + struct unf_lport_s *v_lport, + struct unf_flogi_payload_s *v_flogi_payload) +{ + unsigned long flag = 0; + unsigned int ed_tov = 0; + unsigned int ra_tov = 0; + + UNF_CHECK_VALID(0x3647, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3648, UNF_TRUE, v_flogi_payload, return); + + ed_tov = v_flogi_payload->fabric_parms.co_parms.e_d_tov; + ra_tov = v_flogi_payload->fabric_parms.co_parms.r_a_tov; + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + + /* FC-FS-3: 21.3.4, 21.3.5 */ + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_PUBLIC_LOOP)) { + v_lport->ed_tov = ed_tov; + v_lport->ra_tov = ra_tov; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) with topo(0x%x) no need to save time parameters", + v_lport->port_id, v_lport->nport_id, + v_lport->en_act_topo); + } + + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); +} + +static void unf_fdisc_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* Register to Name Server or Do recovery */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_flogi_payload_s *fdisc_pld = NULL; + unsigned long flag = 0; + unsigned int cmd = 0; + + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + xchg = (struct unf_xchg_s *)v_xchg; + UNF_CHECK_VALID(0x3640, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3641, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3642, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3643, UNF_TRUE, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + return); + fdisc_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->fdisc_acc.fdisc_payload; + if (xchg->byte_orders & UNF_BIT_2) + unf_big_end_to_cpu((unsigned char *)fdisc_pld, + sizeof(struct unf_flogi_payload_s)); + + cmd = fdisc_pld->cmnd; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: FDISC response is (0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmd, lport->port_id, rport->nport_id, xchg->ox_id); + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + UNF_FC_FID_FLOGI); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no Rport", lport->port_id); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_FLOGI; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + if ((cmd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* Case for ACC */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) receive Flogi/Fdisc ACC in state(0x%x)", + lport->port_id, lport->nport_id, + lport->en_states); + + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + return; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + unf_lport_update_nport_id(lport, xchg->sid); + unf_lport_update_time_params(lport, fdisc_pld); + + unf_register_to_switch(lport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: FDISC response is (0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmd, lport->port_id, rport->nport_id, + xchg->ox_id); + + /* Case for RJT: Do L_Port recovery */ + unf_lport_error_recovery(lport); + } +} + +static void unf_rcv_flogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_flogi_payload_s *v_flogi_pld, + unsigned int v_nport_id, + struct unf_xchg_s *v_xchg) +{ + /* PLOGI to Name server or remote port */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_flogi_payload_s *flogi_pld = v_flogi_pld; + struct unf_fabric_parms_s *fabric_params = NULL; + unsigned long long port_name = 0; + unsigned long long node_name = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3649, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3650, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3651, UNF_TRUE, v_flogi_pld, return); + + /* Check L_Port state: FLOGI_WAIT */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]Port(0x%x_0x%x) receive FLOGI ACC with state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + return; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + fabric_params = &flogi_pld->fabric_parms; + node_name = (unsigned long long) + (((unsigned long long)(fabric_params->high_node_name) << 32) | + ((unsigned long long)(fabric_params->low_node_name))); + port_name = (unsigned long long) + (((unsigned long long)(fabric_params->high_port_name) << 32) | + ((unsigned long long)(fabric_params->low_port_name))); + + /* flogi acc pyload class 3 service priority value */ + lport->b_priority = UNF_PRIORITY_DISABLE; + + /* Save Flogi parameters */ + unf_save_fabric_params(lport, rport, fabric_params); + + if (UNF_CHECK_NPORT_FPORT_BIT(flogi_pld) == UNF_N_PORT) { + /* P2P Mode */ + unf_lport_update_topo(lport, UNF_ACT_TOP_P2P_DIRECT); + unf_login_with_rport_in_n2n(lport, port_name, node_name); + } else { + /* for: UNF_ACT_TOP_PUBLIC_LOOP + * /UNF_ACT_TOP_P2P_FABRIC/UNF_TOP_P2P_MASK + */ + if (lport->en_act_topo != UNF_ACT_TOP_PUBLIC_LOOP) + unf_lport_update_topo(lport, UNF_ACT_TOP_P2P_FABRIC); + + unf_lport_update_nport_id(lport, v_nport_id); + unf_lport_update_time_params(lport, flogi_pld); + + /* Save process both for Public loop & Fabric */ + unf_register_to_switch(lport); + } +} + +static void unf_flogi_acc_com_process(struct unf_xchg_s *v_xchg) +{ + /* Maybe within interrupt or thread context */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_flogi_payload_s *flogi_pld = NULL; + unsigned int nport_id = 0; + unsigned int cmnd = 0; + unsigned long flags = 0; + struct unf_xchg_s *xchg = v_xchg; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, return); + + lport = xchg->lport; + rport = xchg->rport; + flogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi_acc.flogi_payload; + cmnd = flogi_pld->cmnd; + + /* Get N_Port_ID & R_Port */ + /* Others: 0xFFFFFE */ + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); + nport_id = UNF_FC_FID_FLOGI; + + /* Get Safe R_Port: reuse only */ + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can not allocate new Rport", + lport->port_id); + + return; + } + + /* Update R_Port N_Port_ID */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + /* Others: 0xFFFFFE */ + rport->nport_id = UNF_FC_FID_FLOGI; + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Process FLOGI ACC or RJT */ + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: FLOGI response is(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmnd, lport->port_id, rport->nport_id, + xchg->ox_id); + + /* Case for ACC */ + unf_rcv_flogi_acc(lport, rport, flogi_pld, xchg->sid, xchg); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: FLOGI response is(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmnd, lport->port_id, rport->nport_id, + xchg->ox_id); + + /* Case for RJT: do L_Port error recovery */ + unf_lport_error_recovery(lport); + } +} + +static int unf_rcv_flogi_acc_async_callback(void *v_arg_in, + void *v_arg_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_arg_in; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + unf_flogi_acc_com_process(xchg); + + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + return RETURN_OK; +} + +static void unf_flogi_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* Callback function for FLOGI ACC or RJT */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_flogi_payload_s *flogi_pld = NULL; + int bbscn_enabled = UNF_FALSE; + enum unf_act_topo_e act_topo = UNF_ACT_TOP_UNKNOWN; + int switch_2_thread = UNF_FALSE; + + UNF_CHECK_VALID(0x3652, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3653, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3654, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3655, UNF_TRUE, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + return); + + xchg->lport = v_lport; + flogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi_acc.flogi_payload; + + if (xchg->byte_orders & UNF_BIT_2) + unf_big_end_to_cpu((unsigned char *)flogi_pld, + sizeof(struct unf_flogi_payload_s)); + + if ((lport->en_act_topo != UNF_ACT_TOP_PUBLIC_LOOP) && + (UNF_CHECK_NPORT_FPORT_BIT(flogi_pld) == UNF_F_PORT)) + /* Get Top Mode (P2P_F) --->>> used for BBSCN */ + act_topo = UNF_ACT_TOP_P2P_FABRIC; + + bbscn_enabled = unf_check_bbscn_is_enabled( + (unsigned char) + lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char) + UNF_GET_BB_SC_N_FROM_PARAMS(&flogi_pld->fabric_parms)); + if ((act_topo == UNF_ACT_TOP_P2P_FABRIC) && + (bbscn_enabled == UNF_TRUE)) { + /* BBSCN Enable or not --->>> used for Context change */ + lport->b_bbscn_support = UNF_TRUE; + switch_2_thread = UNF_TRUE; + } + + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) { + /* Wait for LR done sync: for Root Port */ + (void)unf_irq_process_switch_2_thread( + lport, xchg, + unf_rcv_flogi_acc_async_callback); + } else { + /* Process FLOGI response directly */ + unf_flogi_acc_com_process(xchg); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_ALL, + "[info]Port(0x%x) process FLOGI response: switch(%d) to thread done", + lport->port_id, switch_2_thread); +} + +struct unf_rport_s *unf_find_rport(struct unf_lport_s *v_lport, + unsigned int v_rport_nport_id, + unsigned long long v_port_name) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x3658, UNF_TRUE, v_lport, return NULL); + + if (v_rport_nport_id >= UNF_FC_FID_DOM_MGR) // N_Port_ID <---> SID + /* R_Port is Fabric: by N_Port_ID */ + rport = unf_get_rport_by_nport_id(lport, v_rport_nport_id); + else + /* Others: by WWPN & N_Port_ID */ + rport = unf_find_valid_rport(lport, v_port_name, + v_rport_nport_id); + + return rport; +} + +static void unf_rcv_plogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms) +{ + /* PLOGI ACC: PRLI(non fabric) or RFT_ID(fabric) */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_lgn_parms_s *login_parms = v_login_parms; + unsigned long long node_name = 0; + unsigned long long port_name = 0; + unsigned long flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3659, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3660, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3661, UNF_TRUE, v_login_parms, return); + + node_name = (unsigned long long) + (((unsigned long long)(login_parms->high_node_name) << 32) | + ((unsigned long long)(login_parms->low_node_name))); + port_name = (unsigned long long) + (((unsigned long long)(login_parms->high_port_name) << 32) | + ((unsigned long long)(login_parms->low_port_name))); + + /* ACC & Case for: R_Port is fabric (RFT_ID) */ + if (rport->nport_id >= UNF_FC_FID_DOM_MGR) { + /* Check L_Port state */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_PLOGI_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) receive PLOGI ACC with error state(0x%x)", + v_lport->port_id, lport->en_states); + + return; + } + /* PLOGI_WAIT --> RFT_ID_WAIT */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* PLOGI parameters save */ + unf_save_plogi_params(lport, rport, login_parms, ELS_ACC); + + /* Update R_Port WWPN & WWNN */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->node_name = node_name; + rport->port_name = port_name; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to Send RFT_ID */ + ret = unf_send_rft_id(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x) send RFT_ID failed", + v_lport->port_id); + + unf_lport_error_recovery(lport); + } + } else { + /* ACC & Case for: R_Port is not fabric */ + if ((rport->options == UNF_PORT_MODE_UNKNOWN) && + (rport->port_name != INVALID_WWPN)) + rport->options = unf_get_port_feature(port_name); + /* Set Port Feature with BOTH: cancel */ + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->node_name = node_name; + rport->port_name = port_name; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x)<---LS_ACC(DID:0x%x SID:0x%x) for PLOGI ACC with RPort state(0x%x) NodeName(0x%llx) E_D_TOV(%d)", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state, + rport->node_name, rport->ed_tov); + + if ((lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) && + ((rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_READY))) { + /* Do nothing, return directly */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + return; + } + + /* PRLI_WAIT */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PRLI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* PLOGI parameters save */ + unf_save_plogi_params(lport, rport, login_parms, ELS_ACC); + + /* + * Need Delay to Send PRLI or not + * Used for: L_Port with INI mode & R_Port is not Fabric + */ + unf_check_rport_need_delay_prli(lport, rport, + rport->options); + + /* Do not care: Just used for L_Port only is + * TGT mode or R_Port only is INI mode + */ + unf_schedule_open_work(lport, rport); + } +} + +static void unf_plogi_acc_com_process(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_plogi_payload_s *plogi_pld = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + unsigned long flag = 0; + unsigned long long port_name = 0; + unsigned int rport_nport_id = 0; + unsigned int cmnd = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->rport, return); + + lport = xchg->lport; + rport = xchg->rport; + rport_nport_id = rport->nport_id; + plogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi_acc.payload; + login_parms = &plogi_pld->parms; + cmnd = (plogi_pld->cmnd); + + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* Case for PLOGI ACC: Go to next stage */ + port_name = (unsigned long long) + (((unsigned long long)(login_parms->high_port_name) << 32) | + ((unsigned long long)(login_parms->low_port_name))); + + /* Get (new) R_Port: 0xfffffc has same WWN with 0xfffcxx */ + rport = unf_find_rport(lport, rport_nport_id, port_name); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + rport_nport_id); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) alloc new RPort with wwpn(0x%llx) failed", + lport->port_id, lport->nport_id, + port_name); + return; + } + + /* PLOGI parameters check */ + ret = unf_check_plogi_params(lport, rport, login_parms); + if (ret != RETURN_OK) + return; + + /* Update R_Port state */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = rport_nport_id; + /* --->>> PLOGI_WAIT */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to process PLOGI ACC */ + unf_rcv_plogi_acc(lport, rport, login_parms); + } else { + /* Case for PLOGI RJT: L_Port or R_Port recovery */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x)<---RPort(0x%p) with LS_RJT(DID:0x%x SID:0x%x) for PLOGI", + lport->port_id, rport, lport->nport_id, + rport->nport_id); + + if (rport->nport_id >= UNF_FC_FID_DOM_MGR) + /* for Name server */ + unf_lport_error_recovery(lport); + else + unf_rport_error_recovery(rport); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PLOGI response(0x%x). Port(0x%x_0x%x)<---RPort(0x%x_0x%p) wwpn(0x%llx) OX_ID(0x%x)", + cmnd, lport->port_id, lport->nport_id, rport->nport_id, + rport, port_name, xchg->ox_id); +} + +static int unf_rcv_plogi_acc_async_callback(void *v_argc_in, void *v_argc_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_argc_in; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + unf_plogi_acc_com_process(xchg); + + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + + return RETURN_OK; +} + +static void unf_plogi_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_plogi_payload_s *plogi_pld = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + int bbscn_enabled = UNF_FALSE; + int switch_2_thread = UNF_FALSE; + + UNF_CHECK_VALID(0x3662, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3663, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3664, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3665, UNF_TRUE, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + return); + + plogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi_acc.payload; + login_parms = &plogi_pld->parms; + xchg->lport = v_lport; + + if (xchg->byte_orders & UNF_BIT_2) + unf_big_end_to_cpu((unsigned char *)plogi_pld, + sizeof(struct unf_plogi_payload_s)); + + bbscn_enabled = unf_check_bbscn_is_enabled( + (unsigned char)lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); + if ((bbscn_enabled == UNF_TRUE) && + (lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + switch_2_thread = UNF_TRUE; + lport->b_bbscn_support = UNF_TRUE; + } + + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) { + /* Wait for LR done sync: just for ROOT Port */ + (void)unf_irq_process_switch_2_thread( + lport, xchg, + unf_rcv_plogi_acc_async_callback); + } else { + unf_plogi_acc_com_process(xchg); + } +} + +static void unf_process_logo_in_pri_loop(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send PLOGI or LOGO */ + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3666, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3667, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); /* PLOGI WAIT */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Private Loop with INI mode, Avoid COM Mode problem */ + unf_rport_delay_login(rport); +} + +static void unf_process_logo_in_n2n(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send PLOGI or LOGO */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3668, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3669, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); // PLOGI WAIT + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + if (lport->port_name > rport->port_name) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x)'s WWN(0x%llx) is larger than(0x%llx), should be master", + lport->port_id, lport->port_name, + rport->port_name); + + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x) send PLOGI failed, enter recovery", + v_lport->port_id); + + unf_rport_error_recovery(rport); + } + } else { + unf_rport_enter_logo(lport, rport); + } +} + +void unf_process_logo_in_fabric(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send GFF_ID or LOGO */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_rport_s *sns_port = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3670, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3671, UNF_TRUE, v_rport, return); + + /* L_Port with INI Mode: Send GFF_ID */ + sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find fabric port", + lport->port_id); + return; + } + + ret = unf_get_and_post_disc_event(v_lport, sns_port, rport->nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, + rport->nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, rport->nport_id); + } +} + +static void unf_process_rport_after_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* + * 1. LOGO handler + * 2. RPLO handler + * 3. LOGO_CALL_BACK (send LOGO ACC) handler + */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + + UNF_CHECK_VALID(0x3672, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3673, UNF_TRUE, v_rport, return); + + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + /* R_Port is not fabric port (retry LOGIN or LOGO) */ + if (lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + /* Private Loop: PLOGI or LOGO */ + unf_process_logo_in_pri_loop(lport, rport); + } else if (lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) { + /* Point to Point: LOGIN or LOGO */ + unf_process_logo_in_n2n(lport, rport); + } else { + /* Fabric or Public Loop: GFF_ID or LOGO */ + unf_process_logo_in_fabric(lport, rport); + } + } else { + /* Rport is fabric port: link down now */ + unf_rport_linkdown(lport, rport); + } +} + +static unsigned int unf_rcv_bls_req_done(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + /* + * About I/O resource: + * 1. normal: Release I/O resource during RRQ processer + * 2. exception: Release I/O resource immediately + */ + struct unf_xchg_s *xchg = NULL; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + unsigned long time_ms = 0; + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3723, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3724, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + lport = v_lport; + + /* 1. BLS Request Response: Hot Pool Tag --->>> OX_ID */ + hot_pool_tag = + (unsigned short)v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]; + xchg = (struct unf_xchg_s *)unf_cm_lookup_xchg_by_tag( + (void *)lport, hot_pool_tag); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find exchange by tag(0x%x) when receiving ABTS response", + lport->port_id, hot_pool_tag); + + /* return directly */ + return UNF_RETURN_ERROR; + } + + /* Consistency check */ + UNF_CHECK_ALLOCTIME_VALID(v_lport, hot_pool_tag, xchg, + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + /* 2. Increase ref_cnt for exchange protecting */ + + ret = unf_xchg_ref_inc(xchg, TGT_ABTS_DONE); /* hold */ + UNF_CHECK_VALID(0x3725, UNF_TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + /* 3. Exchag I/O State Set & Check: reused */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->io_state |= INI_IO_STATE_DONE; /* I/O Done */ + xchg->abts_state |= ABTS_RESPONSE_RECEIVED; + if (!(xchg->io_state & INI_IO_STATE_UPABORT)) { + /* NOTE: I/O exchange has been released and used again */ + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) SID(0x%x) exch(0x%p) (0x%x:0x%x:0x%x:0x%x) state(0x%x) is abnormal with cnt(0x%x)", + lport->port_id, lport->nport_id, + xchg->sid, xchg, xchg->hot_pool_tag, + xchg->ox_id, xchg->rx_id, xchg->oid, + xchg->io_state, + atomic_read(&xchg->ref_cnt)); + + /* return directly */ + /* cancel ref & do nothing */ + unf_xchg_ref_dec(xchg, TGT_ABTS_DONE); + return UNF_RETURN_ERROR; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* 4. Exchange Timer check, cancel if necessary */ + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + /* + * 5. Exchage I/O Status check: Succ-> Add RRQ Timer + * ***** pkg->status --- to --->>> scsi_cmnd->result ***** + * + * FAILED: ERR_Code or X_ID is err, or BA_RSP type is err + */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if (v_pkg->status == UNF_IO_SUCCESS) { + /* Succeed: PKG status -->> EXCH status -->> scsi status */ + UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS); + xchg->io_state |= INI_IO_STATE_WAIT_RRQ; + xchg->rx_id = UNF_GET_RXID(v_pkg); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* Add RRQ timer */ + time_ms = (unsigned long)(lport->ra_tov); + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + time_ms, + UNF_TIMER_TYPE_INI_RRQ); + } else { + /* Failed: PKG status -->> EXCH status -->> scsi status */ + UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_FAILED); + if (MARKER_STS_RECEIVED & xchg->abts_state) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* NOTE: release I/O resource immediately */ + unf_cm_free_xchg(lport, xchg); + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) exch(0x%p) OX_RX(0x%x:0x%x) IOstate(0x%x) ABTSstate(0x%x) receive response abnormal ref(0x%x)", + lport->port_id, xchg, xchg->ox_id, + xchg->rx_id, + xchg->io_state, xchg->abts_state, + atomic_read(&xchg->ref_cnt)); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + } + + /* + * 6. If abts response arrived before + * marker sts received just wake up abts marker sema + */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if (!(xchg->abts_state & MARKER_STS_RECEIVED)) { + xchg->ucode_abts_state = v_pkg->status; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* NOTE: wake up semaphore */ + up(&xchg->task_sema); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + /* 7. dec exch ref_cnt */ + unf_xchg_ref_dec(xchg, TGT_ABTS_DONE); + return ret; +} + +static unsigned int unf_rcv_abort_ini_io_done(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + /* INI mode: do not care */ + struct unf_xchg_s *io_xchg = NULL; + unsigned short io_pool_tag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3735, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3736, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + + io_pool_tag = UNF_GET_IO_XCHG_TAG(v_pkg); + io_xchg = (struct unf_xchg_s *)unf_cm_lookup_xchg_by_tag( + (void *)v_lport, + io_pool_tag); + if (io_xchg) { + UNF_CHECK_ALLOCTIME_VALID( + v_lport, io_pool_tag, io_xchg, + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + io_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + /* 1. Timer release */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)io_xchg); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) abort INI IO with status(0x%x) exchange(0x%p) tag(0x%x)", + v_lport->port_id, v_pkg->status, + io_xchg, io_pool_tag); + + /* 2. Free I/O Exchange context */ + unf_cm_free_xchg((void *)v_lport, (void *)io_xchg); + } + + return ret; +} + +unsigned int unf_receive_bls_pkg(void *v_lport, struct unf_frame_pkg_s *v_pkg) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + lport = (struct unf_lport_s *)v_lport; + UNF_CHECK_VALID(0x3730, UNF_TRUE, lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3731, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + + if (v_pkg->type == UNF_PKG_BLS_REQ_DONE) { + /* INI: RCVD BLS Req Done */ + ret = unf_rcv_bls_req_done(v_lport, v_pkg); + } else if (v_pkg->type == UNF_PKG_INI_IO) { + /* INI: Abort Done (do not care) */ + ret = unf_rcv_abort_ini_io_done(v_lport, v_pkg); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) received BLS packet type(%xh) is error", + lport->port_id, v_pkg->type); + + return UNF_RETURN_ERROR; + } + + UNF_REFERNCE_VAR(lport); + + return ret; +} + +static void unf_fill_rls_acc_pld(struct unf_rls_acc_s *v_rls_acc, + struct unf_lport_s *v_lport) +{ + struct unf_rls_acc_payload_s *rls_acc_pld = NULL; + + rls_acc_pld = &v_rls_acc->rls; + rls_acc_pld->cmnd = UNF_ELS_CMND_ACC; + + rls_acc_pld->link_failure_count = + v_lport->err_code_sum.link_fail_count; + rls_acc_pld->loss_of_sync_count = + v_lport->err_code_sum.loss_of_sync_count; + rls_acc_pld->loss_of_signal_count = + v_lport->err_code_sum.loss_of_signal_count; + rls_acc_pld->primitive_seq_count = 0; + rls_acc_pld->invalid_trans_word_count = 0; + rls_acc_pld->invalid_crc_count = + v_lport->err_code_sum.bad_crc_count; +} + +static unsigned int unf_send_rls_acc(struct unf_lport_s *v_lport, + unsigned int v_did, + struct unf_xchg_s *v_xchg) +{ + struct unf_rls_acc_s *rls_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RLS); + v_xchg->did = v_did; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = NULL; + + unf_fill_package(&pkg, v_xchg, v_xchg->rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + return UNF_RETURN_ERROR; + } + + rls_acc = &fc_entry->rls_acc; + unf_fill_rls_acc_pld(rls_acc, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) send Rls acc %s to RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x).", + v_lport->port_id, (ret != RETURN_OK) ? "failed" : "succeed", + v_did, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_rls_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3483, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3484, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_RLS); + + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn] Port(0x%x_0x%x) can`t find RPort by sid(0x%x) OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, v_sid, + v_xchg->ox_id); + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + v_xchg->rport = rport; + + ret = unf_send_rls_acc(v_lport, v_sid, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send RLS ACC failed", + v_lport->port_id); + unf_cm_free_xchg(v_lport, v_xchg); + } + + return ret; +} + diff --git a/drivers/scsi/huawei/hifc/unf_service.h b/drivers/scsi/huawei/hifc/unf_service.h new file mode 100644 index 000000000000..868723128575 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_service.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_SERVICE_H__ +#define __UNF_SERVICE_H__ + +extern unsigned int max_frame_size; + +#define UNF_SET_ELS_ACC_TYPE(v_els_cmd) \ + ((unsigned int)(v_els_cmd) << 16 | ELS_ACC) +#define UNF_SET_ELS_RJT_TYPE(v_els_cmd) \ + ((unsigned int)(v_els_cmd) << 16 | ELS_RJT) + +unsigned int unf_send_gid_ft(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_gid_pt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_gpn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +unsigned int unf_send_gnn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +unsigned int unf_send_gff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +unsigned int unf_send_flogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_fdisc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_plogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_prli(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_receive_els_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +unsigned int unf_send_rff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_rft_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_echo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int *v_time); +unsigned int unf_send_abts(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg); +unsigned int unf_send_scr(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_rrq(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg); +void unf_rport_immediate_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_receive_bls_pkg(void *v_lport, + struct unf_frame_pkg_s *v_pkg); +struct unf_rport_s *unf_find_rport(struct unf_lport_s *v_lport, + unsigned int v_rport_nport_id, + unsigned long long v_port_name); +void unf_login_with_loop_node(struct unf_lport_s *v_lport, unsigned int alpa); +unsigned int unf_receive_gs_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +void unf_rcv_gnn_id_rsp_unknown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +void unf_rcv_gpn_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id); +void unf_rcv_gff_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id); +unsigned int unf_release_rport_res(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); + +unsigned int unf_low_level_bbscn(struct unf_lport_s *v_lport); +unsigned int unf_send_els_done(void *v_lport, struct unf_frame_pkg_s *v_pkg); +unsigned int unf_send_rec(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg); + +typedef int (*unf_evt_task)(void *v_arg_in, void *v_arg_out); + +#endif /* __UNF_SERVICE_H__ */
From: Chenguangli chenguangli2@huawei.com
driver inclusion category: feature bugzilla: NA
-----------------------------------------------------------------------
This module is used to configure the HiFC driver module compilation.
Signed-off-by: Chenguangli chenguangli2@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/configs/euleros_defconfig | 1 + arch/arm64/configs/hulk_defconfig | 2 ++ arch/arm64/configs/openeuler_defconfig | 2 ++ arch/x86/configs/hulk_defconfig | 2 ++ arch/x86/configs/openeuler_defconfig | 2 ++ 5 files changed, 9 insertions(+)
diff --git a/arch/arm64/configs/euleros_defconfig b/arch/arm64/configs/euleros_defconfig index 2762ee8afe05..4d0a652d02e7 100644 --- a/arch/arm64/configs/euleros_defconfig +++ b/arch/arm64/configs/euleros_defconfig @@ -2162,6 +2162,7 @@ CONFIG_SCSI_QLA_FC=m CONFIG_SCSI_QLA_ISCSI=m CONFIG_QEDI=m CONFIG_QEDF=m +# CONFIG_SCSI_HUAWEI_FC is not set CONFIG_SCSI_LPFC=m # CONFIG_SCSI_LPFC_DEBUG_FS is not set # CONFIG_SCSI_DC395x is not set diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig index 4979492bb484..4b259b7561eb 100644 --- a/arch/arm64/configs/hulk_defconfig +++ b/arch/arm64/configs/hulk_defconfig @@ -2160,6 +2160,8 @@ CONFIG_QEDI=m CONFIG_QEDF=m CONFIG_SCSI_LPFC=m # CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_HUAWEI_FC=m +CONFIG_SCSI_FC_HIFC=m # CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_AM53C974 is not set # CONFIG_SCSI_WD719X is not set diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index b2abedf899b5..ffbd0fba2adc 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -2173,6 +2173,8 @@ CONFIG_QEDI=m CONFIG_QEDF=m CONFIG_SCSI_LPFC=m # CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_HUAWEI_FC=m +CONFIG_SCSI_FC_HIFC=m # CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_AM53C974 is not set # CONFIG_SCSI_WD719X is not set diff --git a/arch/x86/configs/hulk_defconfig b/arch/x86/configs/hulk_defconfig index 6a1d3307e34e..2820af8d5b9a 100644 --- a/arch/x86/configs/hulk_defconfig +++ b/arch/x86/configs/hulk_defconfig @@ -2216,6 +2216,8 @@ CONFIG_QEDI=m CONFIG_QEDF=m CONFIG_SCSI_LPFC=m # CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_HUAWEI_FC=m +CONFIG_SCSI_FC_HIFC=m # CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_AM53C974 is not set # CONFIG_SCSI_WD719X is not set diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 912933ebd981..b1f2e0cafe04 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -2214,6 +2214,8 @@ CONFIG_QEDI=m CONFIG_QEDF=m CONFIG_SCSI_LPFC=m # CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_HUAWEI_FC=m +CONFIG_SCSI_FC_HIFC=m # CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_AM53C974 is not set # CONFIG_SCSI_WD719X is not set