From: Chenguangli chenguangli2@huawei.com
driver inclusion category: feature bugzilla: NA
-----------------------------------------------------------------------
This module maintains hifc driver port resources, including HBA, Lport, Rport, queue, and npiv.
Signed-off-by: Chenguangli chenguangli2@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Acked-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/scsi/huawei/hifc/hifc_chipitf.c | 2095 +++++++ drivers/scsi/huawei/hifc/hifc_chipitf.h | 643 +++ drivers/scsi/huawei/hifc/hifc_hba.c | 1627 ++++++ drivers/scsi/huawei/hifc/hifc_hba.h | 234 + drivers/scsi/huawei/hifc/hifc_queue.c | 7020 +++++++++++++++++++++++ drivers/scsi/huawei/hifc/hifc_queue.h | 1363 +++++ drivers/scsi/huawei/hifc/unf_lport.c | 1129 ++++ drivers/scsi/huawei/hifc/unf_lport.h | 569 ++ drivers/scsi/huawei/hifc/unf_npiv.c | 1481 +++++ drivers/scsi/huawei/hifc/unf_npiv.h | 50 + drivers/scsi/huawei/hifc/unf_rport.c | 2430 ++++++++ drivers/scsi/huawei/hifc/unf_rport.h | 285 + 12 files changed, 18926 insertions(+) create mode 100644 drivers/scsi/huawei/hifc/hifc_chipitf.c create mode 100644 drivers/scsi/huawei/hifc/hifc_chipitf.h create mode 100644 drivers/scsi/huawei/hifc/hifc_hba.c create mode 100644 drivers/scsi/huawei/hifc/hifc_hba.h create mode 100644 drivers/scsi/huawei/hifc/hifc_queue.c create mode 100644 drivers/scsi/huawei/hifc/hifc_queue.h create mode 100644 drivers/scsi/huawei/hifc/unf_lport.c create mode 100644 drivers/scsi/huawei/hifc/unf_lport.h create mode 100644 drivers/scsi/huawei/hifc/unf_npiv.c create mode 100644 drivers/scsi/huawei/hifc/unf_npiv.h create mode 100644 drivers/scsi/huawei/hifc/unf_rport.c create mode 100644 drivers/scsi/huawei/hifc/unf_rport.h
diff --git a/drivers/scsi/huawei/hifc/hifc_chipitf.c b/drivers/scsi/huawei/hifc/hifc_chipitf.c new file mode 100644 index 000000000000..fe97f6468557 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_chipitf.c @@ -0,0 +1,2095 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Fabric Channel Linux driver + * Copyright(c) 2018 Huawei Technologies Co., Ltd + * + */ + +#include "unf_common.h" +#include "hifc_chipitf.h" + +#define HIFC_MBOX_TIME_SEC_MAX 60 + +#define HIFC_LINK_UP_COUNT 1 +#define HIFC_LINK_DOWN_COUNT 2 +#define HIFC_FC_DELETE_CMND_COUNT 3 + +#define HIFC_MBX_MAX_TIMEOUT 10000 + +static unsigned int hifc_recv_fc_link_up(struct hifc_hba_s *v_hba, + void *v_buf_in); +static unsigned int hifc_recv_fc_link_down(struct hifc_hba_s *v_hba, + void *v_buf_in); +static unsigned int hifc_recv_fc_del_cmd(struct hifc_hba_s *v_hba, + void *v_buf_in); +static unsigned int hifc_recv_fc_error(struct hifc_hba_s *v_hba, + void *v_buf_in); + +static struct hifc_up_2_drv_msg_handle_s up_msg_handle[] = { + { HIFC_MBOX_RECV_FC_LINKUP, hifc_recv_fc_link_up }, + { HIFC_MBOX_RECV_FC_LINKDOWN, hifc_recv_fc_link_down }, + { HIFC_MBOX_RECV_FC_DELCMD, hifc_recv_fc_del_cmd }, + { HIFC_MBOX_RECV_FC_ERROR, hifc_recv_fc_error } +}; + +void hifc_up_msg_2_driver_proc(void *v_hwdev_handle, void *v_pri_handle, + unsigned char v_cmd, void *v_buf_in, + unsigned short v_in_size, void *v_buf_out, + unsigned short *v_out_size) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int index = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_mbox_header_s *mbx_header = NULL; + + HIFC_CHECK(INVALID_VALUE32, v_hwdev_handle, return); + HIFC_CHECK(INVALID_VALUE32, v_pri_handle, return); + HIFC_CHECK(INVALID_VALUE32, v_buf_in, return); + HIFC_CHECK(INVALID_VALUE32, v_buf_out, return); + HIFC_CHECK(INVALID_VALUE32, v_out_size, return); + + hba = (struct hifc_hba_s *)v_pri_handle; + if (!hba) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR, + "[err]Hba is null"); + + return; + } + + mbx_header = (struct hifc_mbox_header_s *)v_buf_in; + if (mbx_header->cmnd_type != v_cmd) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR, + "[err]Port(0x%x) cmd(0x%x) is not matched with header cmd type(0x%x)", + hba->port_cfg.port_id, v_cmd, + mbx_header->cmnd_type); + return; + } + + while (index < (sizeof(up_msg_handle) / + sizeof(struct hifc_up_2_drv_msg_handle_s))) { + if ((v_cmd == up_msg_handle[index].cmd) && + (up_msg_handle[index].pfn_hifc_msg_up2drv_handler)) { + ret = + up_msg_handle[index].pfn_hifc_msg_up2drv_handler( + hba, + v_buf_in); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, + UNF_ERR, + "[warn]Port(0x%x) process up cmd(0x%x) failed", + hba->port_cfg.port_id, v_cmd); + } + + /* Process Done & return */ + return; + } + index++; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_ERR, + "[err]Port(0x%x) process up cmd(0x%x) failed", + hba->port_cfg.port_id, v_cmd); + + PRINT_OUTBOUND_IOB(UNF_MAJOR, v_buf_in, ((unsigned int)v_in_size)); +} + +unsigned int hifc_get_chip_msg(void *v_hba, void *v_mac) +{ + struct hifc_hba_s *hba = NULL; + struct unf_get_chip_info_argout *wwn = NULL; + struct hifc_inbox_get_chip_info_s get_chip_info; + union hifc_outmbox_generic_u *chip_info_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_mac, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + wwn = (struct unf_get_chip_info_argout *)v_mac; + + memset(&get_chip_info, 0, sizeof(struct hifc_inbox_get_chip_info_s)); + + chip_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!chip_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(chip_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_chip_info.header.cmnd_type = HIFC_MBOX_GET_CHIP_INFO; + get_chip_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_get_chip_info_s)); + + if (hifc_mb_send_and_wait_mbox(hba, &get_chip_info, + sizeof(get_chip_info), chip_info_sts) != + RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + get_chip_info.header.cmnd_type); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + chip_info_sts->get_chip_info_sts.status); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.header.cmnd_type != + HIFC_MBOX_GET_CHIP_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + chip_info_sts->get_chip_info_sts.header.cmnd_type); + + goto exit; + } + + wwn->board_type = chip_info_sts->get_chip_info_sts.board_type; + hba->card_info.card_type = chip_info_sts->get_chip_info_sts.board_type; + wwn->wwpn = chip_info_sts->get_chip_info_sts.wwpn; + wwn->wwnn = chip_info_sts->get_chip_info_sts.wwnn; + wwn->sys_mac = chip_info_sts->get_chip_info_sts.sys_mac; + + ret = RETURN_OK; +exit: + kfree(chip_info_sts); + return ret; +} + +unsigned int hifc_get_chip_capability(void *hw_dev_handle, + struct hifc_chip_info_s *v_chip_info) +{ + struct hifc_inbox_get_chip_info_s get_chip_info; + union hifc_outmbox_generic_u *chip_info_sts = NULL; + unsigned short out_size = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, hw_dev_handle, return UNF_RETURN_ERROR); + + memset(&get_chip_info, 0, sizeof(struct hifc_inbox_get_chip_info_s)); + + chip_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!chip_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(chip_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_chip_info.header.cmnd_type = HIFC_MBOX_GET_CHIP_INFO; + get_chip_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_get_chip_info_s)); + out_size = sizeof(union hifc_outmbox_generic_u); + + if (hifc_msg_to_mgmt_sync(hw_dev_handle, HIFC_MOD_FC, + HIFC_MBOX_GET_CHIP_INFO, + (void *)&get_chip_info.header, + sizeof(struct hifc_inbox_get_chip_info_s), + (union hifc_outmbox_generic_u *)chip_info_sts, + &out_size, + (HIFC_MBX_MAX_TIMEOUT)) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + HIFC_MBOX_GET_CHIP_INFO); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port mailbox status incorrect status(0x%x) .", + chip_info_sts->get_chip_info_sts.status); + + goto exit; + } + + if (chip_info_sts->get_chip_info_sts.header.cmnd_type != + HIFC_MBOX_GET_CHIP_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port receive mailbox type incorrect type: 0x%x.", + chip_info_sts->get_chip_info_sts.header.cmnd_type); + + goto exit; + } + + v_chip_info->wwnn = chip_info_sts->get_chip_info_sts.wwnn; + v_chip_info->wwpn = chip_info_sts->get_chip_info_sts.wwpn; + v_chip_info->tape_support = (unsigned char) + chip_info_sts->get_chip_info_sts.tape_support; + ret = RETURN_OK; +exit: + kfree(chip_info_sts); + return ret; +} + +void hifc_get_red_info_by_rw_type(struct unf_rw_reg_param_s *param, + struct hifc_inmbox_get_reg_info_s *v_reg_info) +{ + if ((param->rw_type == UNF_READ) || + (param->rw_type == UNF_READ_64)) { + v_reg_info->op_code = 0; + } else if ((param->rw_type == UNF_WRITE) || + (param->rw_type == UNF_WRITE_64)) { + v_reg_info->op_code = 1; + } + + if ((param->rw_type == UNF_READ) || + (param->rw_type == UNF_WRITE)) { + v_reg_info->reg_len = 32; + } else if ((param->rw_type == UNF_READ_64) || + (param->rw_type == UNF_WRITE_64)) { + v_reg_info->reg_len = 64; + } +} + +unsigned int hifc_rw_reg(void *v_hba, void *v_params) +{ + struct hifc_hba_s *hba = NULL; + struct unf_rw_reg_param_s *param = NULL; + struct hifc_inmbox_get_reg_info_s reg_info; + union hifc_outmbox_generic_u *reg_info_sts = NULL; + unsigned int para_value_out_l = 0; + unsigned int para_value_out_h = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_params, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + param = (struct unf_rw_reg_param_s *)v_params; + + memset(®_info, 0, sizeof(struct hifc_inmbox_get_reg_info_s)); + reg_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!reg_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(reg_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + hifc_get_red_info_by_rw_type(param, ®_info); + + reg_info.reg_addr = param->offset; + reg_info.reg_value_l32 = (param->value) & VALUEMASK_L; + reg_info.reg_value_h32 = ((param->value) & VALUEMASK_H) >> 32; + + reg_info.header.cmnd_type = HIFC_MBOX_REG_RW_MODE; + reg_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_reg_info_s)); + + if (hifc_mb_send_and_wait_mbox(hba, ®_info, + sizeof(reg_info), + reg_info_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + reg_info.header.cmnd_type); + + goto exit; + } + + if (reg_info_sts->get_reg_info_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + reg_info_sts->get_reg_info_sts.status); + + goto exit; + } + + if (reg_info_sts->get_reg_info_sts.header.cmnd_type != + HIFC_MBOX_REG_RW_MODE_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + reg_info_sts->get_reg_info_sts.header.cmnd_type); + + goto exit; + } + + para_value_out_l = reg_info_sts->get_reg_info_sts.reg_value_l32; + para_value_out_h = reg_info_sts->get_reg_info_sts.reg_value_h32; + param->value = (unsigned long long)para_value_out_l | + ((unsigned long long)para_value_out_h << 32); + + ret = RETURN_OK; +exit: + kfree(reg_info_sts); + return ret; +} + +unsigned int hifc_config_port_table(struct hifc_hba_s *v_hba) +{ + struct hifc_inbox_config_api_s config_api; + union hifc_outmbox_generic_u *out_mbox = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + memset(&config_api, 0, sizeof(config_api)); + out_mbox = kmalloc(sizeof(union hifc_outmbox_generic_u), GFP_ATOMIC); + if (!out_mbox) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(out_mbox, 0, sizeof(union hifc_outmbox_generic_u)); + + config_api.header.cmnd_type = HIFC_MBOX_CONFIG_API; + config_api.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_config_api_s)); + + config_api.op_code = UNDEFINEOPCODE; + + /* change switching top cmd of CM to the cmd that up recognize */ + /* if the cmd equals UNF_TOP_P2P_MASK sending in CM means that it + * should be changed into P2P top, LL using HIFC_TOP_NON_LOOP_MASK + */ + if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_P2P_MASK) { + config_api.topy_mode = 0x2; + /* if the cmd equals UNF_TOP_LOOP_MASK sending in CM means that it + * should be changed into loop top, LL using HIFC_TOP_LOOP_MASK + */ + } else if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_LOOP_MASK) { + config_api.topy_mode = 0x1; + + /* if the cmd equals UNF_TOP_AUTO_MASK sending in CM means that it + * should be changed into loop top, LL using HIFC_TOP_AUTO_MASK + */ + } else if ((unsigned char)v_hba->port_topo_cfg == UNF_TOP_AUTO_MASK) { + config_api.topy_mode = 0x0; + } else { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) topo cmd is error, command type: 0x%x", + v_hba->port_cfg.port_id, + (unsigned char)v_hba->port_topo_cfg); + + return UNF_RETURN_ERROR; + } + + /* About speed */ + config_api.sfp_speed = (unsigned char)(v_hba->port_speed_cfg); + config_api.max_speed = (unsigned char)(v_hba->max_support_speed); + + config_api.rx_bbcredit_32g = HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT; + config_api.rx_bbcredit_16g = HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT; + config_api.rx_bbcredit_842g = HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT; + config_api.rdy_cnt_bf_fst_frm = HIFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT; + config_api.esch_value_32g = HIFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE; + config_api.esch_value_16g = HIFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE; + config_api.esch_value_8g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE; + config_api.esch_value_4g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE; + config_api.esch_value_2g = HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE; + config_api.esch_bust_size = HIFC_LOWLEVEL_DEFAULT_ESCH_BUS_SIZE; + + /* default value:0xFF */ + config_api.hard_alpa = 0xFF; + memcpy(config_api.port_name, v_hba->sys_port_name, UNF_WWN_LEN); + + /* if only for slave, the value is 1; if participate master choosing, + * the value is 0 + */ + config_api.slave = v_hba->port_loop_role; + + /* 1:auto negotiate, 0:fixed mode negotiate */ + if (config_api.sfp_speed == 0) + config_api.auto_sneg = 0x1; + else + config_api.auto_sneg = 0x0; + + /* send & wait */ + if (hifc_mb_send_and_wait_mbox(v_hba, &config_api, + sizeof(config_api), + out_mbox) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x", + v_hba->port_cfg.port_id, + config_api.header.cmnd_type); + + goto exit; + } + + /* mailbox status check */ + if (out_mbox->config_api_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) with status(0x%x) error", + v_hba->port_cfg.port_id, + out_mbox->config_api_sts.header.cmnd_type, + out_mbox->config_api_sts.status); + + goto exit; + } + + /* RSP type check */ + if (out_mbox->config_api_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_API_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) error", + v_hba->port_cfg.port_id, + out_mbox->config_api_sts.header.cmnd_type); + + goto exit; + } + + ret = RETURN_OK; +exit: + kfree(out_mbox); + return ret; +} + +unsigned int hifc_port_switch(struct hifc_hba_s *v_hba, int turn_on) +{ + struct hifc_inbox_port_switch_s port_switch; + union hifc_outmbox_generic_u *port_switch_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + memset(&port_switch, 0, sizeof(port_switch)); + + port_switch_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_switch_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_switch_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + port_switch.header.cmnd_type = HIFC_MBOX_PORT_SWITCH; + port_switch.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inbox_port_switch_s)); + port_switch.op_code = (unsigned char)turn_on; + port_switch.port_type = (unsigned char)v_hba->port_type; + + /* set the value is 0 first, vn2vf mode, vlan discovery automatically */ + port_switch.host_id = 0; + port_switch.pf_id = + (unsigned char)(hifc_global_func_id(v_hba->hw_dev_handle)); + port_switch.fcoe_mode = HIFC_FIP_MODE_VN2VF; + port_switch.conf_vlan = 0xffff; + port_switch.sys_node_name = *(unsigned long long *)v_hba->sys_node_name; + port_switch.sys_port_wwn = *(unsigned long long *)v_hba->sys_port_name; + + /* send & wait mailbox */ + if (hifc_mb_send_and_wait_mbox(v_hba, &port_switch, sizeof(port_switch), + port_switch_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) HIFC can't send and wait mailbox, command type(0x%x) opcode(0x%x)", + v_hba->port_cfg.port_id, + port_switch.header.cmnd_type, port_switch.op_code); + + goto exit; + } + + /* check mailbox rsp status */ + if (port_switch_sts->port_switch_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) status(0x%x) error", + v_hba->port_cfg.port_id, + port_switch_sts->port_switch_sts.header.cmnd_type, + port_switch_sts->port_switch_sts.status); + + goto exit; + } + + /* check mailbox rsp type */ + if (port_switch_sts->port_switch_sts.header.cmnd_type != + HIFC_MBOX_PORT_SWITCH_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) error", + v_hba->port_cfg.port_id, + port_switch_sts->port_switch_sts.header.cmnd_type); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_SUC, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[event]Port(0x%x) switch succeed, turns to %s", + v_hba->port_cfg.port_id, + (turn_on) ? "on" : "off"); + + ret = RETURN_OK; +exit: + kfree(port_switch_sts); + return ret; +} + +unsigned int hifc_config_login_api(struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_para) +{ +#define HIFC_LOOP_RDYNUM 8 + int async_ret = RETURN_OK; + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_inmbox_config_login_s cfg_login; + union hifc_outmbox_generic_u *cfg_login_sts = NULL; + + HIFC_CHECK(INVALID_VALUE32, NULL != v_hba, return UNF_RETURN_ERROR); + + memset(&cfg_login, 0, sizeof(cfg_login)); + cfg_login_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!cfg_login_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(cfg_login_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + cfg_login.header.cmnd_type = HIFC_MBOX_CONFIG_LOGIN_API; + cfg_login.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_config_login_s)); + cfg_login.header.port_id = v_hba->port_index; + + cfg_login.op_code = UNDEFINEOPCODE; + + cfg_login.tx_bb_credit = v_hba->remote_bbcredit; + + cfg_login.etov = v_hba->compared_edtov_val; + cfg_login.rtov = v_hba->compared_ratov_val; + + cfg_login.rt_tov_tag = v_hba->remote_rttov_tag; + cfg_login.ed_tov_tag = v_hba->remote_edtov_tag; + cfg_login.bb_credit = v_hba->remote_bbcredit; + cfg_login.bbscn = HIFC_LSB(v_hba->compared_bbscn); + + if (cfg_login.bbscn) { + cfg_login.lr_flag = + (v_login_para->els_cmnd_code == ELS_PLOGI) ? 0 : 1; + ret = hifc_mb_send_and_wait_mbox(v_hba, &cfg_login, + sizeof(cfg_login), + cfg_login_sts); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.", + v_hba->port_cfg.port_id, + cfg_login.header.cmnd_type); + + goto exit; + } + + if (cfg_login_sts->config_login_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_LOGIN_API_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, + UNF_INFO, "Port(0x%x) Receive mailbox type incorrect. Type: 0x%x.", + v_hba->port_cfg.port_id, + cfg_login_sts->config_login_sts.header.cmnd_type); + + goto exit; + } + + if (cfg_login_sts->config_login_sts.status != STATUS_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", + v_hba->port_cfg.port_id, + cfg_login_sts->config_login_sts.header.cmnd_type, + cfg_login_sts->config_login_sts.status); + + goto exit; + } + } else { + async_ret = hifc_msg_to_mgmt_async(v_hba->hw_dev_handle, + HIFC_MOD_FC, + HIFC_MBOX_CONFIG_LOGIN_API, + &cfg_login, + sizeof(cfg_login)); + + if (async_ret != 0) { + HIFC_MAILBOX_STAT(v_hba, + HIFC_SEND_CONFIG_LOGINAPI_FAIL); + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "Port(0x%x) hifc can't send config login cmd to up,ret:%d.", + v_hba->port_cfg.port_id, async_ret); + + goto exit; + } + + HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CONFIG_LOGINAPI); + } + + HIFC_TRACE(UNF_EVTLOG_LINK_SUC, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) Topo(0x%x) Config login param to up: txbbcredit(0x%x), BB_SC_N(0x%x).", + v_hba->port_cfg.port_id, v_hba->active_topo, + cfg_login.tx_bb_credit, cfg_login.bbscn); + + ret = RETURN_OK; +exit: + kfree(cfg_login_sts); + return ret; +} + +unsigned int hifc_mb_send_and_wait_mbox(struct hifc_hba_s *v_hba, + const void *v_in_mbox, + unsigned short in_size, + union hifc_outmbox_generic_u *out_mbox) +{ + void *handle = NULL; + unsigned short out_size = 0; + unsigned long time_out = 0; + int ret = 0; + struct hifc_mbox_header_s *header; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_in_mbox, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, out_mbox, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_hba->hw_dev_handle, + return UNF_RETURN_ERROR); + + header = (struct hifc_mbox_header_s *)v_in_mbox; + out_size = sizeof(union hifc_outmbox_generic_u); + handle = v_hba->hw_dev_handle; + + /* Wait for las mailbox completion: */ + time_out = wait_for_completion_timeout( + &v_hba->mbox_complete, + (unsigned long)msecs_to_jiffies(HIFC_MBOX_TIME_SEC_MAX * 1000)); + if (time_out == UNF_ZERO) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) wait mailbox(0x%x) completion timeout: %d sec", + v_hba->port_cfg.port_id, header->cmnd_type, + HIFC_MBOX_TIME_SEC_MAX); + + return UNF_RETURN_ERROR; + } + + /* Send Msg to uP Sync: timer 10s */ + ret = hifc_msg_to_mgmt_sync(handle, HIFC_MOD_FC, header->cmnd_type, + (void *)v_in_mbox, in_size, + (union hifc_outmbox_generic_u *)out_mbox, + &out_size, + HIFC_MBX_MAX_TIMEOUT); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) can not send mailbox(0x%x) with ret:%d", + v_hba->port_cfg.port_id, header->cmnd_type, ret); + + complete(&v_hba->mbox_complete); + return UNF_RETURN_ERROR; + } + + complete(&v_hba->mbox_complete); + return RETURN_OK; +} + +unsigned short hifc_get_global_base_qpn(void *v_handle) +{ +#define NIC_UP_CMD_GET_GLOBAL_QPN 102 + + int ret = 0; + unsigned short out_size = 0; + struct hifc_get_global_base_qpn_s qpn_base = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return INVALID_VALUE16); + qpn_base.func_id = hifc_global_func_id(v_handle); + out_size = (u16)sizeof(struct hifc_get_global_base_qpn_s); + + /* Send Msg to uP Sync: timer 10s */ + ret = hifc_msg_to_mgmt_sync(v_handle, + HIFC_MOD_L2NIC, + NIC_UP_CMD_GET_GLOBAL_QPN, + &qpn_base, + (u16)sizeof(qpn_base), + &qpn_base, + &out_size, + HIFC_MBX_MAX_TIMEOUT); + + if (ret || (!out_size) || qpn_base.status) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]hifc_get_global_base_qpn failed, ret %d, out_size %u, qpn_info.ret%u", + ret, out_size, qpn_base.status); + + return 0xFFFF; + } + + return (u16)(qpn_base.base_qpn); +} + +void hifc_initial_dynamic_info(struct hifc_hba_s *v_fc_port) +{ + struct hifc_hba_s *hba = v_fc_port; + unsigned long flag = 0; + + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return); + + spin_lock_irqsave(&hba->hba_lock, flag); + hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN; + hba->active_topo = UNF_ACT_TOP_UNKNOWN; + hba->phy_link = UNF_PORT_LINK_DOWN; + hba->q_set_stage = HIFC_QUEUE_SET_STAGE_INIT; + hba->loop_map_valid = LOOP_MAP_INVALID; + hba->delay_info.srq_delay_flag = 0; + hba->delay_info.root_rq_rcvd_flag = 0; + spin_unlock_irqrestore(&hba->hba_lock, flag); +} + +unsigned int hifc_recv_fc_link_up(struct hifc_hba_s *v_hba, void *v_buf_in) +{ +#define HIFC_LOOP_MASK 0x1 +#define HIFC_LOOPMAP_COUNT 128 + + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_link_event_s *buf_in = NULL; + + buf_in = (struct hifc_link_event_s *)v_buf_in; + v_hba->phy_link = UNF_PORT_LINK_UP; + v_hba->active_port_speed = buf_in->speed; + v_hba->led_states.green_speed_led = + (unsigned char)(buf_in->green_speed_led); + v_hba->led_states.yellow_speed_led = + (unsigned char)(buf_in->yellow_speed_led); + v_hba->led_states.ac_led = (unsigned char)(buf_in->acled); + + if ((buf_in->top_type == HIFC_LOOP_MASK) && + ((buf_in->loop_map_info[1] == UNF_FL_PORT_LOOP_ADDR) || + (buf_in->loop_map_info[2] == UNF_FL_PORT_LOOP_ADDR))) { + v_hba->active_topo = UNF_ACT_TOP_PUBLIC_LOOP; /* Public Loop */ + v_hba->active_al_pa = buf_in->alpa_value; /* AL_PA */ + memcpy(v_hba->loop_map, buf_in->loop_map_info, + HIFC_LOOPMAP_COUNT); + v_hba->loop_map_valid = LOOP_MAP_VALID; + } else if (buf_in->top_type == HIFC_LOOP_MASK) { + v_hba->active_topo = UNF_ACT_TOP_PRIVATE_LOOP;/* Private Loop */ + v_hba->active_al_pa = buf_in->alpa_value; /* AL_PA */ + memcpy(v_hba->loop_map, buf_in->loop_map_info, + HIFC_LOOPMAP_COUNT); + v_hba->loop_map_valid = LOOP_MAP_VALID; + } else { + v_hba->active_topo = UNF_TOP_P2P_MASK; /* P2P_D or P2P_F */ + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[event]Port(0x%x) receive link up event(0x%x) with speed(0x%x) uP_topo(0x%x) driver_topo(0x%x)", + v_hba->port_cfg.port_id, buf_in->link_event, + buf_in->speed, buf_in->top_type, v_hba->active_topo); + + /* Set clear & flush state */ + hifc_set_hba_flush_state(v_hba, UNF_FALSE); + hifc_set_root_sq_flush_state(v_hba, UNF_FALSE); + hifc_set_rport_flush_state(v_hba, UNF_FALSE); + + /* Report link up event to COM */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, + UNF_PORT_LINK_UP, &v_hba->active_port_speed); + + HIFC_LINK_EVENT_STAT(v_hba, HIFC_LINK_UP_COUNT); + + return ret; +} + +unsigned int hifc_recv_fc_link_down(struct hifc_hba_s *v_hba, void *v_buf_in) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_link_event_s *buf_in = NULL; + + buf_in = (struct hifc_link_event_s *)v_buf_in; + + /* 1. Led state setting */ + v_hba->led_states.green_speed_led = + (unsigned char)(buf_in->green_speed_led); + v_hba->led_states.yellow_speed_led = + (unsigned char)(buf_in->yellow_speed_led); + v_hba->led_states.ac_led = (unsigned char)(buf_in->acled); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[event]Port(0x%x) receive link down event(0x%x) reason(0x%x)", + v_hba->port_cfg.port_id, buf_in->link_event, buf_in->reason); + + hifc_initial_dynamic_info(v_hba); + + /* 2. set HBA flush state */ + hifc_set_hba_flush_state(v_hba, UNF_TRUE); + + /* 3. set Root SQ flush state */ + hifc_set_root_sq_flush_state(v_hba, UNF_TRUE); + + /* 4. set R_Port (parent SQ) flush state */ + hifc_set_rport_flush_state(v_hba, UNF_TRUE); + + /* 5. Report link down event to COM */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_LINK_DOWN, 0); + + /* DFX setting */ + HIFC_LINK_REASON_STAT(v_hba, buf_in->reason); + HIFC_LINK_EVENT_STAT(v_hba, HIFC_LINK_DOWN_COUNT); + + return ret; +} + +unsigned int hifc_recv_fc_del_cmd(struct hifc_hba_s *v_hba, void *v_buf_in) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_link_event_s *buf_in = NULL; + + buf_in = (struct hifc_link_event_s *)v_buf_in; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) receive delete cmd event(0x%x)", + v_hba->port_cfg.port_id, buf_in->link_event); + + /* Send buffer clear cmnd */ + ret = hifc_clear_fetched_sq_wqe(v_hba); + + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_SCANNING; + HIFC_LINK_EVENT_STAT(v_hba, HIFC_FC_DELETE_CMND_COUNT); + + HIFC_REFERNCE_VAR(buf_in, buf_in, ret); + return ret; +} + +unsigned int hifc_recv_fc_error(struct hifc_hba_s *v_hba, void *v_buf_in) +{ +#define FC_ERR_LEVEL_DEAD 0 +#define FC_ERR_LEVEL_HIGH 1 +#define FC_ERR_LEVEL_LOW 2 + + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_up_error_event_s *buf_in = NULL; + + buf_in = (struct hifc_up_error_event_s *)v_buf_in; + if (buf_in->error_type >= HIFC_UP_ERR_BUTT || + buf_in->error_value >= HIFC_ERR_VALUE_BUTT) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive a unsupported UP Error Event Type(0x%x) Value(0x%x).", + v_hba->port_cfg.port_id, + buf_in->error_type, + buf_in->error_value); + return ret; + } + + switch (buf_in->error_level) { + case FC_ERR_LEVEL_DEAD: + /* todo: chip reset */ + ret = RETURN_OK; + break; + + case FC_ERR_LEVEL_HIGH: + /* port reset */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, + UNF_PORT_ABNORMAL_RESET, NULL); + break; + + case FC_ERR_LEVEL_LOW: + ret = RETURN_OK; + break; + + default: + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive a unsupported UP Error Event Level(0x%x), Can not Process.", + v_hba->port_cfg.port_id, buf_in->error_level); + return ret; + } + if (buf_in->error_value < HIFC_ERR_VALUE_BUTT) + HIFC_UP_ERR_EVENT_STAT(v_hba, buf_in->error_value); + + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) process UP Error Event Level(0x%x) Type(0x%x) Value(0x%x) %s.", + v_hba->port_cfg.port_id, buf_in->error_level, + buf_in->error_type, buf_in->error_value, + (ret == UNF_RETURN_ERROR) ? "ERROR" : "OK"); + + HIFC_REFERNCE_VAR(buf_in, buf_in, ret); + + return ret; +} + +unsigned int hifc_get_topo_cfg(void *v_hba, void *v_topo_cfg) +{ + struct hifc_hba_s *hba = v_hba; + unsigned int *topo_cfg = v_topo_cfg; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_topo_cfg, return UNF_RETURN_ERROR); + + *topo_cfg = hba->port_topo_cfg; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Get topology config: 0x%x.", + *topo_cfg); + + return RETURN_OK; +} + +unsigned int hifc_get_topo_act(void *v_hba, void *topo_act) +{ + struct hifc_hba_s *hba = v_hba; + enum unf_act_topo_e *ret_topo_act = topo_act; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, topo_act, return UNF_RETURN_ERROR); + + /* Get topo from low_level */ + *ret_topo_act = hba->active_topo; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Get active topology: 0x%x", + *ret_topo_act); + + return RETURN_OK; +} + +unsigned int hifc_get_loop_alpa(void *v_hba, void *v_alpa) +{ + unsigned long flags = 0; + struct hifc_hba_s *hba = v_hba; + unsigned char *alpa = v_alpa; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_alpa, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&hba->hba_lock, flags); + *alpa = hba->active_al_pa; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Get active AL_PA(0x%x)", *alpa); + + return RETURN_OK; +} + +unsigned int hifc_get_lport_led(void *v_hba, void *v_led_state) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + struct hifc_led_state_s *led_state = v_led_state; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_led_state, return UNF_RETURN_ERROR); + + led_state->green_speed_led = hba->led_states.green_speed_led; + led_state->yellow_speed_led = hba->led_states.yellow_speed_led; + led_state->ac_led = hba->led_states.ac_led; + + return ret; +} + +unsigned int hifc_get_hardware_version(void *v_fc_port, void *v_version) +{ + struct hifc_hba_s *fc_port = (struct hifc_hba_s *)v_fc_port; + struct unf_version_str_s *version = + (struct unf_version_str_s *)v_version; + char *hard_ware_ver = NULL; + + HIFC_CHECK(INVALID_VALUE32, version, return UNF_RETURN_ERROR); + hard_ware_ver = version->buf; + HIFC_CHECK(INVALID_VALUE32, hard_ware_ver, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, fc_port, return UNF_RETURN_ERROR); + + hard_ware_ver[UNF_HW_VERSION_LEN - 1] = 0; + + return RETURN_OK; +} + +unsigned int hifc_get_sfp_info(void *v_fc_port, void *v_sfp_info) +{ + struct unf_lport_sfp_info *sfp_info = + (struct unf_lport_sfp_info *)v_sfp_info; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_fc_port; + struct hifc_inmbox_get_sfp_info_s get_sfp_info; + union hifc_outmbox_generic_u *get_sfp_info_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, sfp_info, return UNF_RETURN_ERROR); + + memset(&get_sfp_info, 0, sizeof(get_sfp_info)); + + get_sfp_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!get_sfp_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(get_sfp_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_sfp_info.header.cmnd_type = HIFC_MBOX_GET_SFP_INFO; + get_sfp_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_sfp_info_s)); + get_sfp_info.header.port_id = (hba->port_index); + + /* send mailbox and handle the return sts */ + if (hifc_mb_send_and_wait_mbox(hba, &get_sfp_info, sizeof(get_sfp_info), + get_sfp_info_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.", + hba->port_cfg.port_id, + get_sfp_info.header.cmnd_type); + + goto exit; + } + + sfp_info->status = get_sfp_info_sts->get_sfp_info_sts.status; + if (get_sfp_info_sts->get_sfp_info_sts.status != STATUS_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", + hba->port_cfg.port_id, + get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type, + get_sfp_info_sts->get_sfp_info_sts.status); + + goto exit; + } + + if (get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type != + HIFC_MBOX_GET_SFP_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) Receive mailbox type incorrect. Type: 0x%x.", + hba->port_cfg.port_id, + get_sfp_info_sts->get_sfp_info_sts.header.cmnd_type); + + goto exit; + } + + /* the real sfpinfo is beyond the header of sts */ + memcpy(&sfp_info->sfp_eeprom_info, + ((unsigned char *)get_sfp_info_sts + + sizeof(get_sfp_info_sts->get_sfp_info_sts)), + sizeof(union unf_sfp_eeprome_info)); + + ret = RETURN_OK; +exit: + kfree(get_sfp_info_sts); + return ret; +} + +unsigned int hifc_get_port_info(void *v_hba) +{ + unsigned long flags = 0; + struct hifc_inmbox_get_port_info_s get_port_info; + union hifc_outmbox_generic_u *port_info_sts = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + unsigned int ret = UNF_RETURN_ERROR; + + memset(&get_port_info, 0, sizeof(get_port_info)); + port_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_port_info.header.cmnd_type = HIFC_MBOX_GET_PORT_INFO; + get_port_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_port_info_s)); + get_port_info.header.port_id = hba->port_index; + + if (hifc_mb_send_and_wait_mbox(hba, &get_port_info, + sizeof(get_port_info), port_info_sts) != + RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) send and wait mailbox type(0x%x) failed.", + hba->port_cfg.port_id, + get_port_info.header.cmnd_type); + + goto exit; + } + + if ((port_info_sts->get_port_info_sts.status != STATUS_OK) || + (port_info_sts->get_port_info_sts.header.cmnd_type != + HIFC_MBOX_GET_PORT_INFO_STS)) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) receive mailbox type(0x%x) status(0x%x) error.", + hba->port_cfg.port_id, + port_info_sts->get_port_info_sts.header.cmnd_type, + port_info_sts->get_port_info_sts.status); + + goto exit; + } + + spin_lock_irqsave(&hba->hba_lock, flags); + hba->active_bb_scn = port_info_sts->get_port_info_sts.bbscn; + hba->active_rx_bb_credit = + port_info_sts->get_port_info_sts.non_loop_rx_credit; + spin_unlock_irqrestore(&hba->hba_lock, flags); + + ret = RETURN_OK; +exit: + kfree(port_info_sts); + return ret; +} + +unsigned int hifc_get_port_current_info(void *v_hba, void *port_info) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_inmbox_get_port_info_s get_port_info; + union hifc_outmbox_generic_u *port_info_sts = NULL; + struct unf_get_port_info_argout *current_port_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, port_info, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + current_port_info = (struct unf_get_port_info_argout *)port_info; + + memset(&get_port_info, 0, sizeof(get_port_info)); + port_info_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_info_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_info_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_port_info.header.cmnd_type = HIFC_MBOX_GET_PORT_INFO; + get_port_info.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_port_info_s)); + get_port_info.header.port_id = hba->port_index; + + if (hifc_mb_send_and_wait_mbox(hba, &get_port_info, + sizeof(get_port_info), + port_info_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) send and wait mailbox type(0x%x) failed", + hba->port_cfg.port_id, + get_port_info.header.cmnd_type); + + goto exit; + } + + if ((port_info_sts->get_port_info_sts.status != STATUS_OK) || + (port_info_sts->get_port_info_sts.header.cmnd_type != + HIFC_MBOX_GET_PORT_INFO_STS)) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "Port(0x%x) receive mailbox type(0x%x) status(0x%x) error.", + hba->port_cfg.port_id, + port_info_sts->get_port_info_sts.header.cmnd_type, + port_info_sts->get_port_info_sts.status); + + goto exit; + } + + current_port_info->sfp_speed = + (unsigned char)port_info_sts->get_port_info_sts.sfp_speed; + current_port_info->present = + (unsigned char)port_info_sts->get_port_info_sts.present; + + ret = RETURN_OK; +exit: + kfree(port_info_sts); + return ret; +} + +static void hifc_get_fabric_login_params( + struct hifc_hba_s *hba, + struct unf_port_login_parms_s *v_param_addr) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&hba->hba_lock, flag); + hba->active_topo = v_param_addr->en_act_topo; + hba->compared_ratov_val = v_param_addr->compared_ratov_val; + hba->compared_edtov_val = v_param_addr->compared_edtov_val; + hba->compared_bbscn = v_param_addr->compared_bbscn; + hba->remote_edtov_tag = v_param_addr->remote_edtov_tag; + hba->remote_rttov_tag = v_param_addr->remote_rttov_tag; + hba->remote_bbcredit = v_param_addr->remote_bbcredit; + spin_unlock_irqrestore(&hba->hba_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) topo(0x%x) get fabric params: R_A_TOV(0x%x) E_D_TOV(%u) BB_CREDIT(0x%x) BB_SC_N(0x%x)", + hba->port_cfg.port_id, hba->active_topo, + hba->compared_ratov_val, hba->compared_edtov_val, + hba->remote_bbcredit, hba->compared_bbscn); +} + +static void hifc_get_port_login_params( + struct hifc_hba_s *hba, + struct unf_port_login_parms_s *v_param_addr) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&hba->hba_lock, flag); + hba->compared_ratov_val = v_param_addr->compared_ratov_val; + hba->compared_edtov_val = v_param_addr->compared_edtov_val; + hba->compared_bbscn = v_param_addr->compared_bbscn; + hba->remote_edtov_tag = v_param_addr->remote_edtov_tag; + hba->remote_rttov_tag = v_param_addr->remote_rttov_tag; + hba->remote_bbcredit = v_param_addr->remote_bbcredit; + spin_unlock_irqrestore(&hba->hba_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "Port(0x%x) Topo(0x%x) Get Port Params: R_A_TOV(0x%x), E_D_TOV(0x%x), BB_CREDIT(0x%x), BB_SC_N(0x%x).", + hba->port_cfg.port_id, hba->active_topo, + hba->compared_ratov_val, hba->compared_edtov_val, + hba->remote_bbcredit, hba->compared_bbscn); +} + +unsigned int hifc_update_fabric_param(void *v_hba, void *v_para_in) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + struct unf_port_login_parms_s *login_coparms = v_para_in; + + UNF_CHECK_VALID(0x4923, UNF_B_TRUE, hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4924, UNF_B_TRUE, v_para_in, return UNF_RETURN_ERROR); + + hifc_get_fabric_login_params(hba, login_coparms); + + if ((hba->active_topo == UNF_ACT_TOP_P2P_FABRIC) || + (hba->active_topo == UNF_ACT_TOP_PUBLIC_LOOP)) { + if (hba->work_mode == HIFC_SMARTIO_WORK_MODE_FC) + ret = hifc_config_login_api(hba, login_coparms); + } + + return ret; +} + +unsigned int hifc_update_port_param(void *v_hba, void *v_para_in) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + struct unf_port_login_parms_s *login_coparms = + (struct unf_port_login_parms_s *)v_para_in; + + UNF_CHECK_VALID(0x4923, UNF_B_TRUE, hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4924, UNF_B_TRUE, v_para_in, return UNF_RETURN_ERROR); + + if ((hba->active_topo == UNF_ACT_TOP_PRIVATE_LOOP) || + (hba->active_topo == UNF_ACT_TOP_P2P_DIRECT)) { + hifc_get_port_login_params(hba, login_coparms); + ret = hifc_config_login_api(hba, login_coparms); + } + + hifc_save_login_para_in_sq_info(hba, login_coparms); + + return ret; +} + +unsigned int hifc_clear_port_error_code(void *v_hba, void *v_err_code) +{ + return RETURN_OK; +} + +unsigned int hifc_get_and_clear_port_error_code(void *v_hba, void *v_err_code) +{ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + struct hifc_inmbox_get_err_code_s get_err_code; + union hifc_outmbox_generic_u *err_code_sts = NULL; + struct unf_err_code_s *unf_err_code = + (struct unf_err_code_s *)v_err_code; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, unf_err_code, return UNF_RETURN_ERROR); + + memset(&get_err_code, 0, sizeof(get_err_code)); + + err_code_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!err_code_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(err_code_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + get_err_code.header.cmnd_type = HIFC_MBOX_GET_ERR_CODE; + get_err_code.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_err_code_s)); + + if (hifc_mb_send_and_wait_mbox(hba, &get_err_code, sizeof(get_err_code), + err_code_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) HIFC can't send and wait mailbox, command type: 0x%x.", + hba->port_cfg.port_id, + get_err_code.header.cmnd_type); + + goto exit; + } + + if (err_code_sts->get_err_code_sts.status != STATUS_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect, status: 0x%x.", + hba->port_cfg.port_id, + err_code_sts->get_err_code_sts.header.cmnd_type, + err_code_sts->get_err_code_sts.status); + + goto exit; + } + + unf_err_code->link_fail_count = + err_code_sts->get_err_code_sts.err_code[0]; + unf_err_code->loss_of_sync_count = + err_code_sts->get_err_code_sts.err_code[1]; + unf_err_code->loss_of_signal_count = + err_code_sts->get_err_code_sts.err_code[2]; + unf_err_code->proto_error_count = + err_code_sts->get_err_code_sts.err_code[3]; + unf_err_code->bad_rx_char_count = + err_code_sts->get_err_code_sts.err_code[4]; + unf_err_code->bad_crc_count = + err_code_sts->get_err_code_sts.err_code[5]; + unf_err_code->rx_eo_fa_count = + err_code_sts->get_err_code_sts.err_code[6]; + unf_err_code->dis_frame_count = + err_code_sts->get_err_code_sts.err_code[7]; + + ret = RETURN_OK; +exit: + kfree(err_code_sts); + return ret; +} + +unsigned int hifc_get_work_bale_bbcredit(void *v_hba, void *v_bbcredit) +{ + unsigned int *bb_credit = (unsigned int *)v_bbcredit; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_bbcredit, return UNF_RETURN_ERROR); + + if (hba->active_port_speed == UNF_PORT_SPEED_32_G) + *bb_credit = HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT; + else if (hba->active_port_speed == UNF_PORT_SPEED_16_G) + *bb_credit = HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT; + else + *bb_credit = HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT; + + return RETURN_OK; +} + +unsigned int hifc_get_work_bale_bbscn(void *v_hba, void *v_bbscn) +{ + unsigned int *bbscn = (unsigned int *)v_bbscn; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_bbscn, return UNF_RETURN_ERROR); + + *bbscn = hba->port_bbscn_cfg; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, "Return BBSCN(0x%x) to CM", + *bbscn); + + return RETURN_OK; +} + +unsigned int hifc_get_software_version(void *v_hba, void *v_version) +{ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + struct hifc_inmbox_get_fw_version_s fw_ver; + union hifc_outmbox_generic_u *fw_ver_sts = NULL; + unsigned char *ver = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_version, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + + memset(&fw_ver, 0, sizeof(fw_ver)); + fw_ver_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), GFP_ATOMIC); + if (!fw_ver_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(fw_ver_sts, 0, sizeof(union hifc_outmbox_generic_u)); + ver = (unsigned char *)&fw_ver_sts->get_fw_ver_sts; + + fw_ver.header.cmnd_type = HIFC_MBOX_GET_FW_VERSION; + fw_ver.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_fw_version_s)); + + if (hifc_mb_send_and_wait_mbox(hba, &fw_ver, sizeof(fw_ver), + fw_ver_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) can't send and wait mailbox, command type: 0x%x.", + hba->port_cfg.port_id, + fw_ver.header.cmnd_type); + + goto exit; + } + + if (fw_ver_sts->get_fw_ver_sts.header.cmnd_type != + HIFC_MBOX_GET_FW_VERSION_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) recv mailbox type(0x%x) incorrect.", + hba->port_cfg.port_id, + fw_ver_sts->get_fw_ver_sts.header.cmnd_type); + + goto exit; + } + + if (fw_ver_sts->get_fw_ver_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) Receive mailbox type(0x%x) status(0x%x) incorrect.", + hba->port_cfg.port_id, + fw_ver_sts->get_fw_ver_sts.header.cmnd_type, + fw_ver_sts->get_fw_ver_sts.status); + + goto exit; + } + + memcpy(v_version, ver + HIFC_VER_ADDR_OFFSET, + sizeof(struct hifc_outmbox_get_fw_version_sts_s) - + HIFC_VER_ADDR_OFFSET); + + ret = RETURN_OK; +exit: + kfree(fw_ver_sts); + return ret; +} + +unsigned int hifc_get_firmware_version(void *v_fc_port, void *v_version) +{ + struct hifc_hba_s *fc_port = (struct hifc_hba_s *)v_fc_port; + struct unf_version_str_s *version = + (struct unf_version_str_s *)v_version; + char *fw_ver = NULL; + + HIFC_CHECK(INVALID_VALUE32, version, return UNF_RETURN_ERROR); + fw_ver = version->buf; + HIFC_CHECK(INVALID_VALUE32, fw_ver, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, fc_port, return UNF_RETURN_ERROR); + + fw_ver[UNF_FW_VERSION_LEN - 1] = 0; + + return RETURN_OK; +} + +unsigned int hifc_get_loop_map(void *v_hba, void *v_buf) +{ + unsigned long flags = 0; + struct unf_buf_s *buf = (struct unf_buf_s *)v_buf; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buf, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buf->cbuf, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, buf->buf_len, return UNF_RETURN_ERROR); + + if (buf->buf_len > UNF_LOOPMAP_COUNT) + return UNF_RETURN_ERROR; + + spin_lock_irqsave(&hba->hba_lock, flags); + if (hba->loop_map_valid != LOOP_MAP_VALID) { + spin_unlock_irqrestore(&hba->hba_lock, flags); + return UNF_RETURN_ERROR; + } + memcpy(buf->cbuf, hba->loop_map, buf->buf_len); /* do memcpy */ + spin_unlock_irqrestore(&hba->hba_lock, flags); + + return RETURN_OK; +} + +unsigned int hifc_get_speed_cfg(void *v_hba, void *v_speed_cfg) +{ + struct hifc_hba_s *hba = v_hba; + unsigned int *speed_cfg = v_speed_cfg; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_speed_cfg, return UNF_RETURN_ERROR); + + *speed_cfg = hba->port_speed_cfg; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Get config link rate: 0x%x.", + *speed_cfg); + + return RETURN_OK; +} + +unsigned int hifc_get_speed_act(void *v_hba, void *v_speed_act) +{ + struct hifc_hba_s *hba = v_hba; + unsigned int *speed_act = v_speed_act; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_speed_act, return UNF_RETURN_ERROR); + + *speed_act = hba->active_port_speed; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Get config link rate: 0x%x.", + *speed_act); + return RETURN_OK; +} + +unsigned int hifc_get_port_fec(void *v_hba, void *v_para_out) +{ + struct hifc_hba_s *hba = v_hba; + int *fec = v_para_out; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, fec, return UNF_RETURN_ERROR); + + *fec = (hba->fec_status) ? UNF_TRUE : UNF_FALSE; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Get Port fec: 0x%x.", + (hba->fec_status)); + return RETURN_OK; +} + +unsigned int hifc_save_hba_info(void *v_hba, void *v_para_in) +{ + struct hifc_inmbox_save_hba_info_s *hba_info = NULL; + struct hifc_outmbox_save_hba_info_sts_s *hba_info_sts = NULL; + void *hba_info_addr = v_para_in; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_para_in, return UNF_RETURN_ERROR); + + hba_info = vmalloc(sizeof(struct hifc_inmbox_save_hba_info_s)); + + if (!hba_info) + return UNF_RETURN_ERROR; + + hba_info_sts = vmalloc(sizeof(struct hifc_outmbox_save_hba_info_sts_s)); + + if (!hba_info_sts) { + vfree(hba_info); + return UNF_RETURN_ERROR; + } + + memset(hba_info, 0, sizeof(struct hifc_inmbox_save_hba_info_s)); + memset(hba_info_sts, 0, + sizeof(struct hifc_outmbox_save_hba_info_sts_s)); + + hba_info->header.cmnd_type = HIFC_MBOX_SAVE_HBA_INFO; + hba_info->header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_save_hba_info_s)); + + /* fill mailbox payload */ + memcpy(&hba_info->hba_save_info[0], hba_info_addr, SAVE_PORT_INFO_LEN); + + /* send & wait mailbox */ + if (hifc_mb_send_and_wait_mbox( + hba, hba_info, + sizeof(*hba_info), + (union hifc_outmbox_generic_u *)hba_info_sts) + != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) HIFC can't send and wait mailbox, command type(0x%x)", + hba->port_cfg.port_id, + hba_info->header.cmnd_type); + + vfree(hba_info); + vfree(hba_info_sts); + + return UNF_RETURN_ERROR; + } + + /* check mailbox rsp status */ + if (hba_info_sts->status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) status(0x%x) error", + hba->port_cfg.port_id, + hba_info_sts->header.cmnd_type, + hba_info_sts->status); + + vfree(hba_info); + vfree(hba_info_sts); + + return UNF_RETURN_ERROR; + } + + /* check mailbox rsp type */ + if (hba_info_sts->header.cmnd_type != HIFC_MBOX_SAVE_HBA_INFO_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[err]Port(0x%x) receive mailbox type(0x%x) error", + hba->port_cfg.port_id, + hba_info_sts->header.cmnd_type); + + vfree(hba_info); + vfree(hba_info_sts); + + return UNF_RETURN_ERROR; + } + + memcpy(hba_info_addr, &hba_info_sts->save_hba_info[0], + SAVE_PORT_INFO_LEN - 8); + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[event]Port(0x%x) save hba info succeed", + hba->port_cfg.port_id); + + vfree(hba_info); + vfree(hba_info_sts); + + return RETURN_OK; +} + +unsigned int hifc_mbox_reset_chip(struct hifc_hba_s *v_hba, + unsigned char v_sub_type) +{ + struct hifc_inmbox_port_reset_s port_reset; + union hifc_outmbox_generic_u *port_reset_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + memset(&port_reset, 0, sizeof(port_reset)); + + port_reset_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_reset_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_reset_sts, 0, sizeof(union hifc_outmbox_generic_u)); + port_reset.header.cmnd_type = HIFC_MBOX_PORT_RESET; + port_reset.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_port_reset_s)); + port_reset.op_code = v_sub_type; + + if (hifc_mb_send_and_wait_mbox(v_hba, &port_reset, sizeof(port_reset), + port_reset_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) can't send and wait mailbox with command type(0x%x)", + v_hba->port_cfg.port_id, + port_reset.header.cmnd_type); + + goto exit; + } + + if (port_reset_sts->port_reset_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) receive mailbox type(0x%x) status(0x%x) incorrect", + v_hba->port_cfg.port_id, + port_reset_sts->port_reset_sts.header.cmnd_type, + port_reset_sts->port_reset_sts.status); + + goto exit; + } + + if (port_reset_sts->port_reset_sts.header.cmnd_type != + HIFC_MBOX_PORT_RESET_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) recv mailbox type(0x%x) incorrect", + v_hba->port_cfg.port_id, + port_reset_sts->port_reset_sts.header.cmnd_type); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "[info]Port(0x%x) reset chip mailbox success", + v_hba->port_cfg.port_id); + + ret = RETURN_OK; +exit: + kfree(port_reset_sts); + return ret; +} + +unsigned int hifc_clear_sq_wqe_done(struct hifc_hba_s *v_hba) +{ + int async_ret = RETURN_OK; + struct hifc_inmbx_clear_node_s clear_done; + + clear_done.header.cmnd_type = HIFC_MBOX_BUFFER_CLEAR_DONE; + clear_done.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbx_clear_node_s)); + clear_done.header.port_id = v_hba->port_index; + + async_ret = hifc_msg_to_mgmt_async(v_hba->hw_dev_handle, + HIFC_MOD_FC, + HIFC_MBOX_BUFFER_CLEAR_DONE, + &clear_done, sizeof(clear_done)); + + if (async_ret != 0) { + HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CLEAR_DONE_FAIL); + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC Port(0x%x) can't send clear done cmd to up, ret:%d", + v_hba->port_cfg.port_id, async_ret); + + return UNF_RETURN_ERROR; + } + + HIFC_MAILBOX_STAT(v_hba, HIFC_SEND_CLEAR_DONE); + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHDONE; + v_hba->next_clearing_sq = 0; + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[info]Port(0x%x) clear done msg(0x%x) sent to up succeed with stage(0x%x)", + v_hba->port_cfg.port_id, + clear_done.header.cmnd_type, v_hba->q_set_stage); + + return RETURN_OK; +} + +unsigned int hifc_mbx_get_fw_clear_stat(struct hifc_hba_s *v_hba, + unsigned int *v_clear_state) +{ + struct hifc_inmbox_get_clear_state_s clr_state; + union hifc_outmbox_generic_u *port_clr_state_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_clear_state, return UNF_RETURN_ERROR); + + memset(&clr_state, 0, sizeof(clr_state)); + + port_clr_state_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_clr_state_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_clr_state_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + clr_state.header.cmnd_type = HIFC_MBOX_GET_CLEAR_STATE; + clr_state.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_get_clear_state_s)); + + if (hifc_mb_send_and_wait_mbox(v_hba, &clr_state, sizeof(clr_state), + port_clr_state_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x", + clr_state.header.cmnd_type); + + goto exit; + } + + if (port_clr_state_sts->get_clr_state_sts.status != RETURN_OK) { + HIFC_TRACE( + UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x, state 0x%x.", + v_hba->port_cfg.port_id, + port_clr_state_sts->get_clr_state_sts.header.cmnd_type, + port_clr_state_sts->get_clr_state_sts.status, + port_clr_state_sts->get_clr_state_sts.state); + + goto exit; + } + + if (port_clr_state_sts->get_clr_state_sts.header.cmnd_type != + HIFC_MBOX_GET_CLEAR_STATE_STS) { + HIFC_TRACE( + UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) recv mailbox type(0x%x) incorrect.", + v_hba->port_cfg.port_id, + port_clr_state_sts->get_clr_state_sts.header.cmnd_type); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "Port(0x%x) get port clear state 0x%x.", + v_hba->port_cfg.port_id, + port_clr_state_sts->get_clr_state_sts.state); + + *v_clear_state = port_clr_state_sts->get_clr_state_sts.state; + + ret = RETURN_OK; +exit: + kfree(port_clr_state_sts); + return ret; +} + +unsigned int hifc_mbx_set_fec(struct hifc_hba_s *v_hba, + unsigned int v_fec_opcode) +{ + struct hifc_inmbox_config_fec_s cfg_fec; + union hifc_outmbox_generic_u *port_fec_state_sts = NULL; + unsigned char op_code = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + memset(&cfg_fec, 0, sizeof(cfg_fec)); + + port_fec_state_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!port_fec_state_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(port_fec_state_sts, 0, sizeof(union hifc_outmbox_generic_u)); + + op_code = (unsigned char)v_fec_opcode; + + cfg_fec.header.cmnd_type = HIFC_MBOX_CONFIG_FEC; + cfg_fec.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(HIFC_MBOX_CONFIG_FEC)); + cfg_fec.fec_op_code = op_code; + + if (hifc_mb_send_and_wait_mbox(v_hba, &cfg_fec, sizeof(cfg_fec), + port_fec_state_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) hifc can't send and wait mailbox, command type: 0x%x", + v_hba->port_cfg.port_id, cfg_fec.header.cmnd_type); + + goto exit; + } + + if (port_fec_state_sts->config_fec_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", + v_hba->port_cfg.port_id, + port_fec_state_sts->config_fec_sts.header.cmnd_type, + port_fec_state_sts->config_fec_sts.status); + + goto exit; + } + + if (port_fec_state_sts->config_fec_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_FEC_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "Port(0x%x) recv mailbox type(0x%x) incorrect.", + v_hba->port_cfg.port_id, + port_fec_state_sts->config_fec_sts.header.cmnd_type); + + goto exit; + } + + v_hba->fec_status = v_fec_opcode; + + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EVENT, UNF_MAJOR, + "Port(0x%x) set FEC Status is %u.", + v_hba->port_cfg.port_id, op_code); + + ret = RETURN_OK; +exit: + kfree(port_fec_state_sts); + return ret; +} + +unsigned int hifc_notify_up_config_timer(struct hifc_hba_s *v_hba, int op_code, + unsigned int user_data) +{ + struct hifc_inmbox_config_timer_s time_cfg; + union hifc_outmbox_generic_u *time_cfg_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + + memset(&time_cfg, 0, sizeof(time_cfg)); + + time_cfg_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + if (!time_cfg_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, "malloc outmbox memory failed"); + return UNF_RETURN_ERROR; + } + memset(time_cfg_sts, 0, sizeof(union hifc_outmbox_generic_u)); + time_cfg.header.cmnd_type = HIFC_MBOX_CONFIG_TIMER; + time_cfg.header.length = + HIFC_BYTES_TO_DW_NUM(sizeof(struct hifc_inmbox_config_timer_s)); + time_cfg.op_code = (unsigned short)op_code; + time_cfg.fun_id = hifc_global_func_id(v_hba->hw_dev_handle); + time_cfg.user_data = user_data; + + if (hifc_mb_send_and_wait_mbox(v_hba, &time_cfg, sizeof(time_cfg), + time_cfg_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) hifc can't send and wait mailbox with command type(0x%x)", + v_hba->port_cfg.port_id, time_cfg.header.cmnd_type); + + goto exit; + } + + if (time_cfg_sts->timer_config_sts.header.cmnd_type != + HIFC_MBOX_CONFIG_TIMER_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) recv mailbox type(0x%x) incorrect", + v_hba->port_cfg.port_id, + time_cfg_sts->timer_config_sts.header.cmnd_type); + + goto exit; + } + + if (time_cfg_sts->timer_config_sts.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_EQUIP_ATT, UNF_ERR, + "[warn]Port(0x%x) Receive mailbox type(0x%x) status(0x%x) incorrect", + v_hba->port_cfg.port_id, + time_cfg_sts->timer_config_sts.header.cmnd_type, + time_cfg_sts->timer_config_sts.status); + + goto exit; + } + + HIFC_TRACE(UNF_EVTLOG_LINK_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) notify uP to %s timer success", + v_hba->port_cfg.port_id, op_code ? "open" : "close"); + + ret = RETURN_OK; +exit: + kfree(time_cfg_sts); + return ret; +} + +unsigned int hifc_get_flash_data(void *v_hba, void *v_flash_data) +{ + struct hifc_hba_s *hba = NULL; + struct unf_mbox_flash_data_mgmt_s *flash_data_mgmt = NULL; + union hifc_outmbox_generic_u *flash_data_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_flash_data, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + flash_data_mgmt = kmalloc(sizeof(struct unf_mbox_flash_data_mgmt_s), + GFP_ATOMIC); + + if (!flash_data_mgmt) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData."); + return ret; + } + flash_data_sts = kmalloc(sizeof(struct unf_flash_data_mgmt_sts_s), + GFP_ATOMIC); + + if (!flash_data_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_WARN, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData sts."); + kfree(flash_data_mgmt); + return ret; + } + memset(flash_data_mgmt, 0, sizeof(struct unf_mbox_flash_data_mgmt_s)); + memset(flash_data_sts, 0, sizeof(struct unf_flash_data_mgmt_sts_s)); + flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT; + flash_data_mgmt->mbox_head.length = 1; /* not used */ + flash_data_mgmt->mbox_head.op_code = 0; /* read config */ + + if (hifc_mb_send_and_wait_mbox( + hba, flash_data_mgmt, + sizeof(struct unf_mbox_flash_data_mgmt_s), + flash_data_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + flash_data_mgmt->mbox_head.cmnd_type); + + goto exit; + } + + if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.status); + + goto exit; + } + + if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type != + HIFC_MBOX_FLASH_DATA_MGMT_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto exit; + } + + memcpy((unsigned char *)v_flash_data, + (unsigned char *)&flash_data_sts->flash_data_sts.flash_data, + sizeof(struct unf_flash_data_s)); + ret = RETURN_OK; +exit: + kfree(flash_data_mgmt); + kfree(flash_data_sts); + return ret; +} + +unsigned int hifc_set_flash_data(void *v_hba, void *v_flash_data) +{ + struct hifc_hba_s *hba = NULL; + struct unf_mbox_flash_data_mgmt_s *flash_data_mgmt = NULL; + union hifc_outmbox_generic_u *flash_data_sts = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_flash_data, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + flash_data_mgmt = kmalloc(sizeof(struct unf_mbox_flash_data_mgmt_s), + GFP_ATOMIC); + + if (!flash_data_mgmt) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData."); + return ret; + } + flash_data_sts = kmalloc(sizeof(union hifc_outmbox_generic_u), + GFP_ATOMIC); + + if (!flash_data_sts) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "can't malloc buff for set flashData sts."); + kfree(flash_data_mgmt); + return ret; + } + memset(flash_data_sts, 0, sizeof(union hifc_outmbox_generic_u)); + memset(flash_data_mgmt, 0, sizeof(struct unf_mbox_flash_data_mgmt_s)); + flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT; + flash_data_mgmt->mbox_head.length = 1; /* not used */ + flash_data_mgmt->mbox_head.op_code = 2; /* flash config */ + + if (hifc_mb_send_and_wait_mbox( + hba, flash_data_mgmt, + sizeof(struct unf_mbox_flash_data_mgmt_s), + flash_data_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "hifc can't send and wait mailbox, command type: 0x%x.", + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.status); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type != + HIFC_MBOX_FLASH_DATA_MGMT_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + flash_data_mgmt->mbox_head.cmnd_type = HIFC_MBOX_FLASH_DATA_MGMT; + flash_data_mgmt->mbox_head.length = 1; /* not used */ + flash_data_mgmt->mbox_head.op_code = 1; /* write config */ + memcpy(&flash_data_mgmt->flash_data, + (unsigned char *)v_flash_data, sizeof(struct unf_flash_data_s)); + + if (hifc_mb_send_and_wait_mbox( + hba, flash_data_mgmt, + sizeof(struct unf_mbox_flash_data_mgmt_s), + flash_data_sts) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "hifc can't send and wait mailbox, command type: 0x%x.", + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.status != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) mailbox status incorrect status(0x%x) .", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.status); + + goto END; + } + + if (flash_data_sts->flash_data_sts.mbox_head.cmnd_type != + HIFC_MBOX_FLASH_DATA_MGMT_STS) { + HIFC_TRACE(UNF_EVTLOG_LINK_ERR, UNF_LOG_REG_ATT, UNF_KEVENT, + "Port(0x%x) receive mailbox type incorrect type: 0x%x.", + hba->port_cfg.port_id, + flash_data_sts->flash_data_sts.mbox_head.cmnd_type); + + goto END; + } + ret = RETURN_OK; +END: + kfree(flash_data_mgmt); + kfree(flash_data_sts); + return ret; +} diff --git a/drivers/scsi/huawei/hifc/hifc_chipitf.h b/drivers/scsi/huawei/hifc/hifc_chipitf.h new file mode 100644 index 000000000000..8b4915d2a990 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_chipitf.h @@ -0,0 +1,643 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_CHIPITF_H__ +#define __HIFC_CHIPITF_H__ + +#include "unf_log.h" +#include "hifc_utils.h" +#include "hifc_module.h" +#include "hifc_service.h" + +/* CONF_API_CMND */ +#define HIFC_MBOX_CONFIG_API 0x00 +#define HIFC_MBOX_CONFIG_API_STS 0xA0 + +/* GET_CHIP_INFO_API_CMD */ +#define HIFC_MBOX_GET_CHIP_INFO 0x01 +#define HIFC_MBOX_GET_CHIP_INFO_STS 0xA1 + +/* PORT_RESET */ +#define HIFC_MBOX_PORT_RESET 0x02 +#define HIFC_MBOX_PORT_RESET_STS 0xA2 + +/* SFP_SWITCH_API_CMND */ +#define HIFC_MBOX_PORT_SWITCH 0x03 +#define HIFC_MBOX_PORT_SWITCH_STS 0xA3 + +/* GET_SFP_INFO */ +#define HIFC_MBOX_GET_SFP_INFO 0x04 +#define HIFC_MBOX_GET_SFP_INFO_STS 0xA4 + +/* CONF_AF_LOGIN_API_CMND */ +#define HIFC_MBOX_CONFIG_LOGIN_API 0x06 +#define HIFC_MBOX_CONFIG_LOGIN_API_STS 0xA6 + +/* BUFFER_CLEAR_DONE_CMND */ +#define HIFC_MBOX_BUFFER_CLEAR_DONE 0x07 +#define HIFC_MBOX_BUFFER_CLEAR_DONE_STS 0xA7 + +#define HIFC_MBOX_GET_ERR_CODE 0x08 +#define HIFC_MBOX_GET_ERR_CODE_STS 0xA8 + +#define HIFC_MBOX_GET_UP_STATE 0x09 +#define HIFC_MBOX_GET_UP_STATE_STS 0xA9 + +/* LOOPBACK MODE */ +#define HIFC_MBOX_LOOPBACK_MODE 0x0A +#define HIFC_MBOX_LOOPBACK_MODE_STS 0xAA + +/* REG RW MODE */ +#define HIFC_MBOX_REG_RW_MODE 0x0B +#define HIFC_MBOX_REG_RW_MODE_STS 0xAB + +/* GET CLEAR DONE STATE */ +#define HIFC_MBOX_GET_CLEAR_STATE 0x0E +#define HIFC_MBOX_GET_CLEAR_STATE_STS 0xAE + +/* GET UP & UCODE VER */ +#define HIFC_MBOX_GET_FW_VERSION 0x0F +#define HIFC_MBOX_GET_FW_VERSION_STS 0xAF + +/* CONFIG TIMER */ +#define HIFC_MBOX_CONFIG_TIMER 0x10 +#define HIFC_MBOX_CONFIG_TIMER_STS 0xB0 + +/* CONFIG SRQC */ +#define HIFC_MBOX_CONFIG_SRQC 0x11 +#define HIFC_MBOX_CONFIG_SRQC_STS 0xB1 + +/* Led Test */ +#define HIFC_MBOX_LED_TEST 0x12 +#define HIFC_MBOX_LED_TEST_STS 0xB2 + +/* set esch */ +#define HIFC_MBOX_SET_ESCH 0x13 +#define HIFC_MBOX_SET_ESCH_STS 0xB3 + +/* set get tx serdes */ +#define HIFC_MBOX_SET_GET_SERDES_TX 0x14 +#define HIFC_MBOX_SET_GET_SERDES_TX_STS 0xB4 + +/* get rx serdes */ +#define HIFC_MBOX_GET_SERDES_RX 0x15 +#define HIFC_MBOX_GET_SERDES_RX_STS 0xB5 + +/* i2c read write */ +#define HIFC_MBOX_I2C_WR_RD 0x16 +#define HIFC_MBOX_I2C_WR_RD_STS 0xB6 + +/* Set FEC Enable */ +#define HIFC_MBOX_CONFIG_FEC 0x17 +#define HIFC_MBOX_CONFIG_FEC_STS 0xB7 + +/* GET UCODE STATS CMD */ +#define HIFC_MBOX_GET_UCODE_STAT 0x18 +#define HIFC_MBOX_GET_UCODE_STAT_STS 0xB8 + +/* gpio read write */ +#define HIFC_MBOX_GPIO_WR_RD 0x19 +#define HIFC_MBOX_GPIO_WR_RD_STS 0xB9 + +/* GET PORT INFO CMD */ +#define HIFC_MBOX_GET_PORT_INFO 0x20 +#define HIFC_MBOX_GET_PORT_INFO_STS 0xC0 + +/* save hba info CMD */ +#define HIFC_MBOX_SAVE_HBA_INFO 0x24 +#define HIFC_MBOX_SAVE_HBA_INFO_STS 0xc4 + +#define HIFC_MBOX_FLASH_DATA_MGMT 0x25 +#define HIFC_MBOX_FLASH_DATA_MGMT_STS 0xc5 + +/* FCOE: DRV->UP */ +#define HIFC_MBOX_SEND_ELS_CMD 0x2A +#define HIFC_MBOX_SEND_VPORT_INFO 0x2B + +/* FC: UP->DRV */ +#define HIFC_MBOX_RECV_FC_LINKUP 0x40 +#define HIFC_MBOX_RECV_FC_LINKDOWN 0x41 +#define HIFC_MBOX_RECV_FC_DELCMD 0x42 +#define HIFC_MBOX_RECV_FC_ERROR 0x43 + +#define LOOP_MAP_VALID 1 +#define LOOP_MAP_INVALID 0 + +#define HIFC_MBOX_SIZE 1024 +#define HIFC_MBOX_HEADER_SIZE 4 + +#define ATUOSPEED 1 +#define FIXEDSPEED 0 +#define UNDEFINEOPCODE 0 + +#define VALUEMASK_L 0x00000000FFFFFFFF +#define VALUEMASK_H 0xFFFFFFFF00000000 + +#define STATUS_OK 0 +#define STATUS_FAIL 1 + +enum hifc_drv_2_up_unblock_msg_cmd_code_e { + HIFC_SEND_ELS_CMD, + HIFC_SEND_ELS_CMD_FAIL, + HIFC_RCV_ELS_CMD_RSP, + HIFC_SEND_CONFIG_LOGINAPI, + HIFC_SEND_CONFIG_LOGINAPI_FAIL, + HIFC_RCV_CONFIG_LOGIN_API_RSP, + HIFC_SEND_CLEAR_DONE, + HIFC_SEND_CLEAR_DONE_FAIL, + HIFC_RCV_CLEAR_DONE_RSP, + HIFC_SEND_VPORT_INFO_DONE, + HIFC_SEND_VPORT_INFO_FAIL, + HIFC_SEND_VPORT_INFO_RSP, + HIFC_MBOX_CMD_BUTT + +}; + +/* up to driver handle templete */ +struct hifc_up_2_drv_msg_handle_s { + unsigned char cmd; + unsigned int (*pfn_hifc_msg_up2drv_handler)(struct hifc_hba_s *v_hba, + void *v_buf_in); +}; + +/* Mbox Common Header */ +struct hifc_mbox_header_s { + unsigned char cmnd_type; + unsigned char length; + unsigned char port_id; + unsigned char reserved; + +}; + +/* open or close the sfp */ +struct hifc_inbox_port_switch_s { + struct hifc_mbox_header_s header; + + unsigned char op_code; + unsigned char port_type; + unsigned short reserved; + + unsigned char host_id; + unsigned char pf_id; + unsigned char fcoe_mode; + unsigned char reserved2; + + unsigned short conf_vlan; + unsigned short reserved3; + + unsigned long long sys_port_wwn; + unsigned long long sys_node_name; +}; + +struct hifc_outbox_port_switch_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +/* config API */ +struct hifc_inbox_config_api_s { + struct hifc_mbox_header_s header; + + unsigned int op_code : 8; + unsigned int reserved1 : 24; + + unsigned char topy_mode; + unsigned char sfp_speed; + unsigned char max_speed; + unsigned char hard_alpa; + + unsigned char port_name[UNF_WWN_LEN]; + + unsigned int slave : 1; + unsigned int auto_sneg : 1; + unsigned int reserved2 : 30; + + unsigned int rx_bbcredit_32g : 16; /* 160 */ + unsigned int rx_bbcredit_16g : 16; /* 80 */ + unsigned int rx_bbcredit_842g : 16; /* 50 */ + unsigned int rdy_cnt_bf_fst_frm : 16; /* 8 */ + + unsigned int esch_value_32g; + unsigned int esch_value_16g; + unsigned int esch_value_8g; + unsigned int esch_value_4g; + unsigned int esch_value_2g; + unsigned int esch_bust_size; +}; + +struct hifc_outbox_config_api_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +/* Get chip info */ +struct hifc_inbox_get_chip_info_s { + struct hifc_mbox_header_s header; + +}; + +struct hifc_outbox_get_chip_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned char status; + unsigned char board_type; + unsigned char rvsd; + unsigned char tape_support : 1; + unsigned char reserved : 7; + + unsigned long long wwpn; + unsigned long long wwnn; + unsigned long long sys_mac; + +}; + +/* Get reg info */ +struct hifc_inmbox_get_reg_info_s { + struct hifc_mbox_header_s header; + unsigned int op_code : 1; + unsigned int reg_len : 8; + unsigned int rsvd : 23; + unsigned int reg_addr; + unsigned int reg_value_l32; + unsigned int reg_value_h32; + unsigned int rvsd[27]; +}; + +/* Get reg info sts */ +struct hifc_outmbox_get_reg_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned short rvsd0; + unsigned char rvsd1; + unsigned char status; + unsigned int reg_value_l32; + unsigned int reg_value_h32; + unsigned int rvsd[28]; +}; + +/* Config login API */ +struct hifc_inmbox_config_login_s { + struct hifc_mbox_header_s header; + + unsigned int op_code : 8; + unsigned int reserved1 : 24; + + unsigned short tx_bb_credit; + unsigned short reserved2; + + unsigned int rtov; + unsigned int etov; + + unsigned int rt_tov_tag : 1; + unsigned int ed_tov_tag : 1; + unsigned int bb_credit : 6; + unsigned int bbscn : 8; + unsigned int lr_flag : 16; +}; + +struct hifc_outmbox_config_login_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +/* port reset */ +#define HIFC_MBOX_SUBTYPE_LIGHT_RESET 0x0 +#define HIFC_MBOX_SUBTYPE_HEAVY_RESET 0x1 + +struct hifc_inmbox_port_reset_s { + struct hifc_mbox_header_s header; + + unsigned int op_code : 8; + unsigned int reserved1 : 24; +}; + +struct hifc_outmbox_port_reset_sts_s { + struct hifc_mbox_header_s header; + + unsigned short reserved; + unsigned char reserved2; + unsigned char status; +}; + +struct hifc_inmbox_get_sfp_info_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_outmbox_get_sfp_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned int rcvd : 8; + unsigned int length : 16; + unsigned int status : 8; +}; + +/* get and clear error code */ +struct hifc_inmbox_get_err_code_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_outmbox_get_err_code_sts_s { + struct hifc_mbox_header_s header; + + unsigned short rsvd; + unsigned char rsvd2; + unsigned char status; + + unsigned int err_code[8]; +}; + +/* uP-->Driver asyn event API */ +struct hifc_link_event_s { + struct hifc_mbox_header_s header; + + unsigned char link_event; + unsigned char reason; + unsigned char speed; + unsigned char top_type; + + unsigned char alpa_value; + unsigned char reserved1; + unsigned short paticpate : 1; + unsigned short acled : 1; + unsigned short yellow_speed_led : 1; + unsigned short green_speed_led : 1; + unsigned short reserved : 12; + + unsigned char loop_map_info[128]; +}; + +enum hifc_up_err_type_e { + HIFC_UP_ERR_DRV_PARA = 0, + HIFC_UP_ERR_SFP = 1, + HIFC_UP_ERR_32G_PUB = 2, + HIFC_UP_ERR_32G_UA = 3, + HIFC_UP_ERR_32G_MAC = 4, + HIFC_UP_ERR_NON32G_DFX = 5, + HIFC_UP_ERR_NON32G_MAC = 6, + HIFC_UP_ERR_BUTT +}; + +enum hifc_up_err_value_e { + /* ERR type 0 */ + HIFC_DRV_2_UP_PARA_ERR = 0, + + /* ERR type 1 */ + HIFC_SFP_SPEED_ERR, + + /* ERR type 2 */ + HIFC_32GPUB_UA_RXESCH_FIFO_OF, + HIFC_32GPUB_UA_RXESCH_FIFO_UCERR, + + /* ERR type 3 */ + HIFC_32G_UA_UATX_LEN_ABN, + HIFC_32G_UA_RXAFIFO_OF, + HIFC_32G_UA_TXAFIFO_OF, + HIFC_32G_UA_RXAFIFO_UCERR, + HIFC_32G_UA_TXAFIFO_UCERR, + + /* ERR type 4 */ + HIFC_32G_MAC_RX_BBC_FATAL, + HIFC_32G_MAC_TX_BBC_FATAL, + HIFC_32G_MAC_TXFIFO_UF, + HIFC_32G_MAC_PCS_TXFIFO_UF, + HIFC_32G_MAC_RXBBC_CRDT_TO, + HIFC_32G_MAC_PCS_RXAFIFO_OF, + HIFC_32G_MAC_PCS_TXFIFO_OF, + HIFC_32G_MAC_FC2P_RXFIFO_OF, + HIFC_32G_MAC_FC2P_TXFIFO_OF, + HIFC_32G_MAC_FC2P_CAFIFO_OF, + HIFC_32G_MAC_PCS_RXRSFECM_UCEER, + HIFC_32G_MAC_PCS_RXAFIFO_UCEER, + HIFC_32G_MAC_PCS_TXFIFO_UCEER, + HIFC_32G_MAC_FC2P_RXFIFO_UCEER, + HIFC_32G_MAC_FC2P_TXFIFO_UCEER, + + /* ERR type 5 */ + HIFC_NON32G_DFX_FC1_DFX_BF_FIFO, + HIFC_NON32G_DFX_FC1_DFX_BP_FIFO, + HIFC_NON32G_DFX_FC1_DFX_RX_AFIFO_ERR, + HIFC_NON32G_DFX_FC1_DFX_TX_AFIFO_ERR, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBUF_FIFO1, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_RXBBC_TO, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXDAT_FIFO, + HIFC_NON32G_DFX_FC1_DFX_DIRQ_TXCMD_FIFO, + HIFC_NON32G_DFX_FC1_ERR_R_RDY, + + /* ERR type 6 */ + HIFC_NON32G_MAC_FC1_FAIRNESS_ERROR, + + HIFC_ERR_VALUE_BUTT +}; + +struct hifc_up_error_event_s { + struct hifc_mbox_header_s header; + + unsigned char link_event; + unsigned char error_level; + unsigned char error_type; + unsigned char error_value; +}; + +struct hifc_inmbx_clear_node_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_inmbox_get_clear_state_s { + struct hifc_mbox_header_s header; + unsigned int resvd[31]; +}; + +struct hifc_outmbox_get_clear_state_sts_s { + struct hifc_mbox_header_s header; + unsigned short rsvd; + unsigned char state; /* 1--clear doing. 0---clear done. */ + unsigned char status; /* 0--ok,!0---fail */ + unsigned int resvd[30]; +}; + +#define HIFC_FIP_MODE_VN2VF 0 +#define HIFC_FIP_MODE_VN2VN 1 + +/* get port state */ +struct hifc_inmbox_get_port_info_s { + struct hifc_mbox_header_s header; +}; + +/* save hba info */ +struct hifc_inmbox_save_hba_info_s { + struct hifc_mbox_header_s header; + + unsigned int hba_save_info[254]; + +}; + +struct hifc_outmbox_get_port_info_sts_s { + struct hifc_mbox_header_s header; + + unsigned int status : 8; + unsigned int fec_vis_tts_16g : 8; + unsigned int bbscn : 8; + unsigned int loop_credit : 8; + + unsigned int non_loop_rx_credit : 8; + unsigned int non_loop_tx_credit : 8; + unsigned int sfp_speed : 8; + unsigned int present : 8; + +}; + +struct hifc_outmbox_save_hba_info_sts_s { + struct hifc_mbox_header_s header; + unsigned short rsvd1; + unsigned char rsvd2; + unsigned char status; + unsigned int rsvd3; + unsigned int save_hba_info[252]; +}; + +#define HIFC_VER_ADDR_OFFSET (8) +struct hifc_inmbox_get_fw_version_s { + struct hifc_mbox_header_s header; +}; + +struct hifc_outmbox_get_fw_version_sts_s { + struct hifc_mbox_header_s header; + + unsigned char status; + unsigned char rsv[3]; + + unsigned char ucode_ver[HIFC_VER_LEN]; + unsigned char ucode_compile_time[HIFC_COMPILE_TIME_LEN]; + + unsigned char up_ver[HIFC_VER_LEN]; + unsigned char up_compile_time[HIFC_COMPILE_TIME_LEN]; + + unsigned char boot_ver[HIFC_VER_LEN]; + unsigned char boot_compile_time[HIFC_COMPILE_TIME_LEN]; +}; + +/* Set Fec Enable */ +struct hifc_inmbox_config_fec_s { + struct hifc_mbox_header_s header; + + unsigned char fec_op_code; + unsigned char rsv0; + unsigned short rsv1; +}; + +struct hifc_outmbox_config_fec_sts_s { + struct hifc_mbox_header_s header; + + unsigned short usrsv0; + unsigned char ucrsv1; + unsigned char status; +}; + +struct hifc_inmbox_config_timer_s { + struct hifc_mbox_header_s header; + + unsigned short op_code; + unsigned short fun_id; + unsigned int user_data; +}; + +struct hifc_outmbox_config_timer_sts_s { + struct hifc_mbox_header_s header; + + unsigned char status; + unsigned char rsv[3]; +}; + +union hifc_outmbox_generic_u { + struct { + struct hifc_mbox_header_s header; + unsigned int rsvd[(HIFC_MBOX_SIZE - HIFC_MBOX_HEADER_SIZE) / + sizeof(unsigned int)]; + } generic; + + struct hifc_outbox_port_switch_sts_s port_switch_sts; + struct hifc_outbox_config_api_sts_s config_api_sts; + struct hifc_outbox_get_chip_info_sts_s get_chip_info_sts; + struct hifc_outmbox_get_reg_info_sts_s get_reg_info_sts; + struct hifc_outmbox_config_login_sts_s config_login_sts; + struct hifc_outmbox_port_reset_sts_s port_reset_sts; + struct hifc_outmbox_get_sfp_info_sts_s get_sfp_info_sts; + struct hifc_outmbox_get_err_code_sts_s get_err_code_sts; + struct hifc_outmbox_get_clear_state_sts_s get_clr_state_sts; + struct hifc_outmbox_get_fw_version_sts_s get_fw_ver_sts; + struct hifc_outmbox_config_fec_sts_s config_fec_sts; + struct hifc_outmbox_config_timer_sts_s timer_config_sts; + struct hifc_outmbox_get_port_info_sts_s get_port_info_sts; + struct unf_flash_data_mgmt_sts_s flash_data_sts; +}; + +unsigned int hifc_get_chip_msg(void *v_hba, void *v_mac); +unsigned int hifc_config_port_table(struct hifc_hba_s *v_hba); +unsigned int hifc_port_switch(struct hifc_hba_s *v_hba, int turn_on); +unsigned int hifc_get_speed_act(void *v_hba, void *v_speed_act); +unsigned int hifc_get_speed_cfg(void *v_hba, void *v_speed_cfg); +unsigned int hifc_get_loop_map(void *v_hba, void *v_buf); +unsigned int hifc_get_firmware_version(void *v_fc_port, void *v_ver); +unsigned int hifc_get_work_bale_bbcredit(void *v_hba, void *v_bb_credit); +unsigned int hifc_get_work_bale_bbscn(void *v_hba, void *v_bbscn); +unsigned int hifc_get_and_clear_port_error_code(void *v_hba, void *v_err_code); +unsigned int hifc_get_port_current_info(void *v_hba, void *v_port_info); +unsigned int hifc_get_port_fec(void *v_hba, void *v_para_out); +unsigned int hifc_get_software_version(void *v_fc_port, void *v_ver); +unsigned int hifc_get_port_info(void *v_hba); +unsigned int hifc_rw_reg(void *v_hba, void *v_params); +unsigned int hifc_clear_port_error_code(void *v_hba, void *v_err_code); +unsigned int hifc_get_sfp_info(void *v_fc_port, void *v_sfp_info); +unsigned int hifc_get_hardware_version(void *v_fc_port, void *v_ver); +unsigned int hifc_get_lport_led(void *v_hba, void *v_led_state); +unsigned int hifc_get_loop_alpa(void *v_hba, void *v_alpa); +unsigned int hifc_get_topo_act(void *v_hba, void *v_topo_act); +unsigned int hifc_get_topo_cfg(void *v_hba, void *v_topo_cfg); +unsigned int hifc_config_login_api( + struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_parms); +unsigned int hifc_mb_send_and_wait_mbox(struct hifc_hba_s *v_hba, + const void *v_in_mbox, + unsigned short in_size, + union hifc_outmbox_generic_u + *v_out_mbox); +void hifc_up_msg_2_driver_proc(void *v_hwdev_handle, + void *v_pri_handle, + unsigned char v_cmd, + void *v_buf_in, + unsigned short v_in_size, + void *v_buf_out, + unsigned short *v_out_size); + +unsigned int hifc_mbox_reset_chip(struct hifc_hba_s *v_hba, + unsigned char v_sub_type); +unsigned int hifc_clear_sq_wqe_done(struct hifc_hba_s *v_hba); +unsigned int hifc_update_fabric_param(void *v_hba, void *v_para_in); +unsigned int hifc_update_port_param(void *v_hba, void *v_para_in); +unsigned int hifc_mbx_get_fw_clear_stat(struct hifc_hba_s *v_hba, + unsigned int *v_clear_state); +unsigned short hifc_get_global_base_qpn(void *v_handle); +unsigned int hifc_mbx_set_fec(struct hifc_hba_s *v_hba, + unsigned int v_fec_opcode); +unsigned int hifc_notify_up_config_timer(struct hifc_hba_s *v_hba, + int v_opcode, + unsigned int v_user_data); +unsigned int hifc_save_hba_info(void *v_hba, void *v_para_in); +unsigned int hifc_get_chip_capability(void *hw_dev_handle, + struct hifc_chip_info_s *v_chip_info); +unsigned int hifc_get_flash_data(void *v_hba, void *v_flash_data); +unsigned int hifc_set_flash_data(void *v_hba, void *v_flash_data); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_hba.c b/drivers/scsi/huawei/hifc/hifc_hba.c new file mode 100644 index 000000000000..bab735916b8e --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hba.c @@ -0,0 +1,1627 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "hifc_module.h" +#include "hifc_chipitf.h" +#include "hifc_io.h" +#include "hifc_portmng.h" +#include "hifc_lld.h" +#include "hifc_cqm_object.h" +#include "hifc_cqm_main.h" +#include "hifc_mgmt.h" +#include "hifc_hba.h" + +struct hifc_hba_s *hifc_hba[HIFC_HBA_PORT_MAX_NUM]; +unsigned long probe_bit_map[HIFC_MAX_PROBE_PORT_NUM / HIFC_PORT_NUM_PER_TABLE]; +static unsigned long card_num_bit_map[HIFC_MAX_PROBE_PORT_NUM / + HIFC_PORT_NUM_PER_TABLE]; +static struct hifc_card_num_manage_s card_num_manage[HIFC_MAX_CARD_NUM]; +/* probe global lock */ +spinlock_t probe_spin_lock; +unsigned int max_parent_qpc_num; + +static unsigned int hifc_port_config_set(void *v_hba, + enum unf_port_config_set_op_e op_code, + void *v_var_in); +static unsigned int hifc_port_config_get(void *v_hba, + enum unf_port_config_get_op_e op_code, + void *param_out); +static unsigned int hifc_sfp_switch(void *v_hba, void *v_para_in); +static unsigned int hifc_get_hba_pcie_link_state(void *v_hba, + void *v_link_state); + +struct service_register_template_s service_cqm_temp = { + .scq_ctx_size = HIFC_SCQ_CNTX_SIZE, + /* srq, scq context_size configuration */ + .srq_ctx_size = HIFC_SRQ_CNTX_SIZE, + /* the API of asynchronous event from TILE to driver */ + .aeq_callback = hifc_process_aeqe, +}; + +/* default configuration: auto speed, auto topology, INI+TGT */ +static struct unf_cfg_item_s hifc_port_cfg_parm[] = { + { "port_id", 0, 0x110000, 0xffffff}, + /* port mode:INI(0x20), TGT(0x10), BOTH(0x30) */ + { "port_mode", 0, 0x20, 0xff}, + /* port topology, 0x3: loop, 0xc:p2p, 0xf:auto ,0x10:vn2vn */ + { "port_topology", 0, 0xf, 0x20}, + /* alpa address of port */ + { "port_alpa", 0, 0xdead, 0xffff}, + /* queue depth of originator registered to SCSI midlayer */ + { "max_queue_depth", 0, 512, 512}, + { "sest_num", 0, 4096, 4096}, + { "max_login", 0, 2048, 2048}, + /* nodename from 32 bit to 64 bit */ + { "node_name_high", 0, 0x1000286e, 0xffffffff}, + /* nodename from 0 bit to 31 bit */ + { "node_name_low", 0, 0xd4bbf12f, 0xffffffff}, + /* portname from 32 bit to 64 bit */ + { "port_name_high", 0, 0x2000286e, 0xffffffff}, + /* portname from 0 bit to 31 bit */ + { "port_name_low", 0, 0xd4bbf12f, 0xffffffff}, + /* port speed 0:auto 1:1Gbps 2:2Gbps 3:4Gbps 4:8Gbps 5:16Gbps */ + { "port_speed", 0, 0, 32}, + /* unit: us */ + { "interrupt_delay", 0, 0, 100}, + { "tape_support", 0, 0, 1}, /* tape support */ + { "End", 0, 0, 0} +}; + +struct unf_low_level_function_op_s hifc_fun_op = { + .low_level_type = UNF_HIFC_FC, + .name = "HIFC", + /* XID allocated from CM level */ + .xchg_mgr_type = UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE, + .abts_xchg = UNF_NO_EXTRA_ABTS_XCHG, + .pass_through_flag = UNF_LOW_LEVEL_PASS_THROUGH_PORT_LOGIN, + .support_max_npiv_num = UNF_HIFC_MAXNPIV_NUM, + .chip_id = 0, + .support_max_speed = UNF_PORT_SPEED_32_G, + .support_max_rport = UNF_HIFC_MAXRPORT_NUM, + .sfp_type = UNF_PORT_TYPE_FC_SFP, + .rport_release_type = UNF_LOW_LEVEL_RELEASE_RPORT_ASYNC, + .sirt_page_mode = UNF_LOW_LEVEL_SIRT_PAGE_MODE_XCHG, + + /* Link service */ + .service_op = { + .pfn_unf_els_send = hifc_send_els_cmnd, + .pfn_unf_bls_send = hifc_send_bls_cmnd, + .pfn_unf_gs_send = hifc_send_gs_cmnd, + .pfn_unf_cmnd_send = hifc_send_scsi_cmnd, + .pfn_unf_release_rport_res = hifc_free_parent_resource, + .pfn_unf_flush_ini_resp_que = hifc_flush_ini_resp_queue, + .pfn_unf_alloc_rport_res = hifc_alloc_parent_resource, + .pfn_unf_rport_session_rst = hifc_rport_session_rst, + }, + + /* Port Mgr */ + .port_mgr_op = { + .pfn_ll_port_config_set = hifc_port_config_set, + .pfn_ll_port_config_get = hifc_port_config_get, + .pfn_ll_port_diagnose = hifc_port_diagnose, + } +}; + +struct hifc_port_config_op_s { + enum unf_port_config_set_op_e op_code; + unsigned int (*pfn_hifc_operation)(void *v_hba, void *v_para_in); +}; + +struct hifc_port_config_op_s hifc_config_set_op[] = { + { UNF_PORT_CFG_SET_SPEED, hifc_set_port_speed }, + { UNF_PORT_CFG_SET_TOPO, hifc_set_port_topo }, + { UNF_PORT_CFG_SET_BBSCN, hifc_set_port_bbscn }, + { UNF_PORT_CFG_SET_SFP_SWITCH, hifc_sfp_switch }, + { UNF_PORT_CFG_SET_PORT_SWITCH, hifc_sfp_switch }, + { UNF_PORT_CFG_SET_PORT_STATE, hifc_set_port_state }, + { UNF_PORT_CFG_UPDATE_WWN, NULL }, + { UNF_PORT_CFG_SET_FCP_CONF, hifc_set_port_fcp_conf }, + { UNF_PORT_CFG_SET_LOOP_ROLE, hifc_set_loop_role }, + { UNF_PORT_CFG_SET_MAX_SUPPORT_SPEED, hifc_set_max_support_speed }, + { UNF_PORT_CFG_UPDATE_FABRIC_PARAM, hifc_update_fabric_param }, + { UNF_PORT_CFG_UPDATE_PLOGI_PARAM, hifc_update_port_param }, + { UNF_PORT_CFG_UPDATE_FDISC_PARAM, NULL }, + { UNF_PORT_CFG_SAVE_HBA_INFO, hifc_save_hba_info }, + { UNF_PORT_CFG_SET_HBA_BASE_INFO, hifc_set_hba_base_info }, + { UNF_PORT_CFG_SET_FLASH_DATA_INFO, hifc_set_flash_data }, + { UNF_PORT_CFG_SET_BUTT, NULL } +}; + +struct hifc_port_cfg_get_op_s { + enum unf_port_config_get_op_e op_code; + unsigned int (*pfn_hifc_operation)(void *v_hba, void *param_out); +}; + +struct hifc_port_cfg_get_op_s hifc_config_get_op[] = { + { UNF_PORT_CFG_GET_SPEED_CFG, hifc_get_speed_cfg }, + { UNF_PORT_CFG_GET_SPEED_ACT, hifc_get_speed_act }, + { UNF_PORT_CFG_GET_TOPO_CFG, hifc_get_topo_cfg }, + { UNF_PORT_CFG_GET_TOPO_ACT, hifc_get_topo_act }, + { UNF_PORT_CFG_GET_LOOP_MAP, hifc_get_loop_map }, + { UNF_PORT_CFG_GET_SFP_PRESENT, NULL }, + { UNF_PORT_CFG_GET_SFP_INFO, hifc_get_sfp_info }, + { UNF_PORT_CFG_GET_FW_VER, hifc_get_firmware_version }, + { UNF_PORT_CFG_GET_HW_VER, hifc_get_hardware_version }, + { UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, hifc_get_work_bale_bbcredit }, + { UNF_PORT_CFG_GET_WORKBALE_BBSCN, hifc_get_work_bale_bbscn }, + { UNF_PORT_CFG_GET_LOOP_ALPA, hifc_get_loop_alpa }, + { UNF_PORT_CFG_GET_MAC_ADDR, hifc_get_chip_msg }, + { UNF_PORT_CFG_CLR_LESB, hifc_clear_port_error_code }, + { UNF_PORT_CFG_GET_LESB_THEN_CLR, hifc_get_and_clear_port_error_code}, + { UNF_PORT_CFG_GET_PORT_INFO, hifc_get_port_current_info }, + { UNF_PORT_CFG_GET_LED_STATE, hifc_get_lport_led }, + { UNF_PORT_CFG_GET_FEC, hifc_get_port_fec }, + { UNF_PORT_CFG_GET_PCIE_LINK_STATE, hifc_get_hba_pcie_link_state }, + { UNF_PORT_CFG_GET_FLASH_DATA_INFO, hifc_get_flash_data }, + { UNF_PORT_CFG_GET_BUTT, NULL } +}; + +static unsigned int hifc_port_config_set(void *v_phba, + enum unf_port_config_set_op_e op_code, + void *v_var_in) +{ + unsigned int op_idx = 0; + + HIFC_CHECK(INVALID_VALUE32, v_phba, return UNF_RETURN_ERROR); + + for (op_idx = 0; + op_idx < sizeof(hifc_config_set_op) / + sizeof(struct hifc_port_config_op_s); + op_idx++) { + if (op_code == hifc_config_set_op[op_idx].op_code) { + if (!hifc_config_set_op[op_idx].pfn_hifc_operation) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Null operation for configuration, opcode(0x%x), operation ID(0x%x)", + op_code, op_idx); + return UNF_RETURN_ERROR; + } else { + return hifc_config_set_op[op_idx].pfn_hifc_operation(v_phba, v_var_in); + } + } + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]No operation code for configuration, opcode(0x%x)", + op_code); + + return UNF_RETURN_ERROR; +} + +static unsigned int hifc_port_config_get(void *v_phba, + enum unf_port_config_get_op_e op_code, + void *v_para_out) +{ + unsigned int op_idx = 0; + + HIFC_CHECK(INVALID_VALUE32, v_phba, return UNF_RETURN_ERROR); + + for (op_idx = 0; + op_idx < sizeof(hifc_config_get_op) / + sizeof(struct hifc_port_cfg_get_op_s); + op_idx++) { + if (op_code == hifc_config_get_op[op_idx].op_code) { + if (!hifc_config_get_op[op_idx].pfn_hifc_operation) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Null operation to get configuration, opcode(0x%x), operation ID(0x%x)", + op_code, op_idx); + return UNF_RETURN_ERROR; + } else { + return hifc_config_get_op[op_idx].pfn_hifc_operation(v_phba, v_para_out); + } + } + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]No operation to get configuration, opcode(0x%x)", + op_code); + + return UNF_RETURN_ERROR; +} + +static unsigned int hifc_check_port_cfg( + const struct hifc_port_cfg_s *v_port_cfg) +{ + int topo_condition, speed_condition; + /* About Work Topology */ + topo_condition = ((v_port_cfg->port_topology != UNF_TOP_LOOP_MASK) && + (v_port_cfg->port_topology != UNF_TOP_P2P_MASK) && + (v_port_cfg->port_topology != UNF_TOP_AUTO_MASK)); + if (topo_condition) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Configured port topology(0x%x) is incorrect", + v_port_cfg->port_topology); + return UNF_RETURN_ERROR; + } + + /* About Work Mode */ + if (v_port_cfg->port_mode != UNF_PORT_MODE_INI) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Configured port mode(0x%x) is incorrect", + v_port_cfg->port_mode); + + return UNF_RETURN_ERROR; + } + + /* About Work Speed */ + speed_condition = ((v_port_cfg->port_speed != UNF_PORT_SPEED_AUTO) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_2_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_4_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_8_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_16_G) && + (v_port_cfg->port_speed != UNF_PORT_SPEED_32_G)); + if (speed_condition) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Configured port speed(0x%x) is incorrect", + v_port_cfg->port_speed); + return UNF_RETURN_ERROR; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Check port configuration OK"); + + return RETURN_OK; +} + +static unsigned int hifc_get_port_cfg(struct hifc_hba_s *v_hba, + struct hifc_chip_info_s *v_chip_info, + unsigned char v_card_num) +{ +#define UNF_CONFIG_ITEM_LEN 15 + + /* + * Maximum length of a configuration item name, including the end + * character + */ +#define UNF_MAX_ITEM_NAME_LEN (32 + 1) + + /* Get and check parameters */ + char cfg_item[UNF_MAX_ITEM_NAME_LEN]; + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = v_hba; + int iret = RETURN_ERROR_S32; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + memset((void *)cfg_item, 0, sizeof(cfg_item)); + + hba->card_info.func_num = + (hifc_global_func_id(v_hba->hw_dev_handle)) & UNF_FUN_ID_MASK; + hba->card_info.card_num = v_card_num; + + /* The range of PF of FC server is from PF1 to PF2 */ + iret = snprintf(cfg_item, UNF_CONFIG_ITEM_LEN, "hifc_cfg_%1u", + (hba->card_info.func_num)); + UNF_FUNCTION_RETURN_CHECK(iret, UNF_CONFIG_ITEM_LEN); + cfg_item[UNF_MAX_ITEM_NAME_LEN - 1] = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Get port configuration: %s", cfg_item); + + /* Get configuration parameters from file */ + UNF_LOWLEVEL_GET_CFG_PARMS(ret, cfg_item, &hifc_port_cfg_parm[0], + (unsigned int *)(void *)&hba->port_cfg, + sizeof(hifc_port_cfg_parm) / + sizeof(struct unf_cfg_item_s)); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't get configuration", + hba->port_cfg.port_id); + + return ret; + } + + if (max_parent_qpc_num <= 2048) { + hba->port_cfg.sest_num = 2048; + hba->port_cfg.max_login = 2048; + } + + hba->port_cfg.port_id &= 0xff0000; + hba->port_cfg.port_id |= hba->card_info.card_num << 8; + hba->port_cfg.port_id |= hba->card_info.func_num; + hba->port_cfg.tape_support = (unsigned int)v_chip_info->tape_support; + + /* Parameters check */ + ret = hifc_check_port_cfg(&hba->port_cfg); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) check configuration incorrect", + hba->port_cfg.port_id); + + return ret; + } + + /* Set configuration which is got from file */ + hba->port_speed_cfg = hba->port_cfg.port_speed; + hba->port_topo_cfg = hba->port_cfg.port_topology; + + return ret; +} + +void hifc_flush_root_ctx(struct hifc_hba_s *v_hba) +{ + int ret = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + ret = hifc_func_rx_tx_flush(v_hba->hw_dev_handle); + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]chipif_func_rx_tx_flush failed with return value(0x%x)", + ret); + } +} + +static unsigned int hifc_delete_srqc_via_cmdq_sync(struct hifc_hba_s *v_hba, + unsigned long long sqrc_gpa) +{ + /* Via CMND Queue */ +#define HIFC_DEL_SRQC_TIMEOUT 3000 + + int ret; + struct hifcoe_cmdqe_delete_srqc_s del_srqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + /* Alloc Cmnd buffer */ + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf allocate failed"); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC); + return UNF_RETURN_ERROR; + } + + /* Build & Send Cmnd */ + memset(&del_srqc_cmd, 0, sizeof(del_srqc_cmd)); + del_srqc_cmd.wd0.task_type = HIFCOE_TASK_T_DEL_SRQC; + del_srqc_cmd.srqc_gpa_h = HIFC_HIGH_32_BITS(sqrc_gpa); + del_srqc_cmd.srqc_gpa_l = HIFC_LOW_32_BITS(sqrc_gpa); + hifc_cpu_to_big32(&del_srqc_cmd, sizeof(del_srqc_cmd)); + memcpy(cmdq_in_buf->buf, &del_srqc_cmd, sizeof(del_srqc_cmd)); + cmdq_in_buf->size = sizeof(del_srqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, + cmdq_in_buf, NULL, HIFC_DEL_SRQC_TIMEOUT); + + /* Free Cmnd Buffer */ + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send del srqc via cmdq failed, ret=0x%x", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SRQC); + + return RETURN_OK; +} + +void hifc_flush_srq_ctx(struct hifc_hba_s *v_hba) +{ + struct hifc_srq_info_s *srq_info = NULL; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Start destroy ELS SRQC"); + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + /* Check state to avoid to flush SRQC again */ + srq_info = &v_hba->els_srq_info; + if (srq_info->srq_type == HIFC_SRQ_ELS && + srq_info->enable == UNF_TRUE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]HBA(0x%x) flush ELS SRQC", + v_hba->port_index); + + (void)hifc_delete_srqc_via_cmdq_sync( + v_hba, + srq_info->cqm_srq_info->q_ctx_paddr); + } +} + +static unsigned int hifc_create_queues(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + + ret = hifc_create_root_queues(v_hba); + if (ret != RETURN_OK) + goto out_creat_root_queue_fail; + + /* Initialize shared resources of SCQ and SRQ in parent queue */ + ret = hifc_create_common_share_queues(v_hba); + if (ret != RETURN_OK) + goto out_create_common_queue_fail; + + /* Initialize parent queue manager resources */ + ret = hifc_alloc_parent_queue_mgr(v_hba); + if (ret != RETURN_OK) + goto out_free_share_queue_resource; + + /* Initialize shared WQE page pool in parent SQ */ + ret = hifc_alloc_parent_sq_wqe_page_pool(v_hba); + if (ret != RETURN_OK) + goto out_free_parent_queue_resource; + + /* + * Notice: the configuration of SQ and QID(default_sq_id) + * must be the same in FC + */ + v_hba->next_clearing_sq = 0; + v_hba->default_sq_id = HIFC_QID_SQ; + + return RETURN_OK; + +out_free_parent_queue_resource: + hifc_free_parent_queue_mgr(v_hba); + +out_free_share_queue_resource: + hifc_flush_scq_ctx(v_hba); + hifc_flush_srq_ctx(v_hba); + hifc_destroy_common_share_queues(v_hba); + +out_create_common_queue_fail: + hifc_destroy_root_queues(v_hba); + +out_creat_root_queue_fail: + hifc_flush_root_ctx(v_hba); + + return ret; +} + +static void hifc_destroy_queues(struct hifc_hba_s *v_hba) +{ + /* Free parent queue resource */ + hifc_free_parent_queues(v_hba); + + /* Free queue manager resource */ + hifc_free_parent_queue_mgr(v_hba); + + /* Free linked List SQ and WQE page pool resource */ + hifc_free_parent_sq_wqe_page_pool(v_hba); + + /* Free shared SRQ and SCQ queue resource */ + hifc_destroy_common_share_queues(v_hba); + + /* Free root queue resource */ + hifc_destroy_root_queues(v_hba); +} + +static unsigned int hifc_notify_up_open_timer(struct hifc_hba_s *v_hba) +{ + int op_code = UNF_TRUE; + unsigned int cmd_scq_bit_map = 0; + unsigned int scq_index = 0; + unsigned int ret; + + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) + cmd_scq_bit_map |= HIFC_SCQ_IS_CMD(scq_index) ? + (1 << scq_index) : (0 << scq_index); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) open timer, cmdscq bitmap:0x%x", + v_hba->port_cfg.port_id, cmd_scq_bit_map); + + ret = hifc_notify_up_config_timer(v_hba, op_code, cmd_scq_bit_map); + + return ret; +} + +static unsigned int hifc_notify_up_close_timer(struct hifc_hba_s *v_hba) +{ + int op_code = UNF_FALSE; + unsigned int cmd_scq_bit_map = 0; + unsigned int scq_index = 0; + unsigned int ret; + + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) + cmd_scq_bit_map |= HIFC_SCQ_IS_CMD(scq_index) ? + (1 << scq_index) : (0 << scq_index); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) close timer with cmd_scq bitmap(0x%x)", + v_hba->port_cfg.port_id, cmd_scq_bit_map); + + ret = hifc_notify_up_config_timer(v_hba, op_code, cmd_scq_bit_map); + + return ret; +} + +static unsigned int hifc_initial_chip_access(struct hifc_hba_s *v_hba) +{ + int ret = RETURN_OK; + + /* 1. + * Initialize cqm access related with scq, emb cq, aeq(ucode-->driver) + */ + service_cqm_temp.service_handle = v_hba; + ret = cqm_service_register(v_hba->hw_dev_handle, &service_cqm_temp); + if (ret != CQM_SUCCESS) + return UNF_RETURN_ERROR; + + /* 2. Initialize mailbox(driver-->up), aeq(up--->driver) access */ + ret = hifc_register_mgmt_msg_cb(v_hba->hw_dev_handle, + HIFC_MOD_FC, v_hba, + hifc_up_msg_2_driver_proc); + if (ret != CQM_SUCCESS) + goto out_unreg_cqm; + + return RETURN_OK; + +out_unreg_cqm: + cqm_service_unregister(v_hba->hw_dev_handle); + + return UNF_RETURN_ERROR; +} + +static void hifc_release_chip_access(struct hifc_hba_s *v_hba) +{ + HIFC_CHECK(INVALID_VALUE32, v_hba->hw_dev_handle, return); + + hifc_unregister_mgmt_msg_cb(v_hba->hw_dev_handle, HIFC_MOD_FC); + + cqm_service_unregister(v_hba->hw_dev_handle); +} + +static void hifc_get_chip_info(struct hifc_hba_s *v_hba) +{ + unsigned int exi_base = 0; + unsigned int fun_index = 0; + + v_hba->vpid_start = v_hba->fc_service_cap.dev_fc_cap.vp_id_start; + v_hba->vpid_end = v_hba->fc_service_cap.dev_fc_cap.vp_id_end; + fun_index = hifc_global_func_id(v_hba->hw_dev_handle); + exi_base = 0; + + exi_base += (fun_index * HIFC_EXIT_STRIDE); + v_hba->exit_base = HIFC_LSW(exi_base); + v_hba->exit_count = HIFC_EXIT_STRIDE; + v_hba->image_count = UNF_HIFC_MAXRPORT_NUM; + v_hba->max_support_speed = max_speed; + v_hba->port_index = HIFC_LSB(fun_index); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) base information: PortIndex=0x%x, ImgCount=0x%x, ExiBase=0x%x, ExiCount=0x%x, VpIdStart=0x%x, VpIdEnd=0x%x, MaxSpeed=0x%x, Speed=0x%x, Topo=0x%x", + v_hba->port_cfg.port_id, v_hba->port_index, + v_hba->image_count, v_hba->exit_base, + v_hba->exit_count, v_hba->vpid_start, + v_hba->vpid_end, v_hba->max_support_speed, + v_hba->port_speed_cfg, v_hba->port_topo_cfg); +} + +static unsigned int hifc_init_host_res(struct hifc_hba_s *v_hba) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + + /* Initialize spin lock */ + spin_lock_init(&hba->hba_lock); + spin_lock_init(&hba->flush_state_lock); + spin_lock_init(&hba->delay_info.srq_lock); + /* Initialize init_completion */ + init_completion(&hba->hba_init_complete); + init_completion(&hba->mbox_complete); + + /* Step-1: initialize the communication channel between driver and uP */ + ret = hifc_initial_chip_access(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't initialize chip access", + hba->port_cfg.port_id); + + goto out_unmap_memory; + } + /* Step-2: get chip configuration information before creating + * queue resources + */ + hifc_get_chip_info(hba); + + /* Step-3: create queue resources */ + ret = hifc_create_queues(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't create queues", + hba->port_cfg.port_id); + + goto out_release_chip_access; + } + + /* Initialize status parameters */ + hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN; + hba->active_topo = UNF_ACT_TOP_UNKNOWN; + hba->sfp_on = UNF_FALSE; + hba->port_loop_role = UNF_LOOP_ROLE_MASTER_OR_SLAVE; + hba->phy_link = UNF_PORT_LINK_DOWN; + hba->q_set_stage = HIFC_QUEUE_SET_STAGE_INIT; + + /* Initialize parameters referring to the lowlevel */ + hba->remote_rttov_tag = 0; + hba->port_bbscn_cfg = HIFC_LOWLEVEL_DEFAULT_BB_SCN; + + /* Initialize timer, and the unit of E_D_TOV is ms */ + hba->remote_edtov_tag = 0; + hba->remote_bbcredit = 0; + hba->compared_bbscn = 0; + hba->compared_edtov_val = UNF_DEFAULT_EDTOV; + hba->compared_ratov_val = UNF_DEFAULT_RATOV; + hba->removing = UNF_FALSE; + hba->dev_present = UNF_TRUE; + + /* Initialize parameters about cos */ + hba->cos_bit_map = cos_bit_map; + memset(hba->cos_rport_cnt, 0, HIFC_MAX_COS_NUM * sizeof(atomic_t)); + + /* Mailbox access completion */ + complete(&hba->mbox_complete); + + /* Notify uP to open timer after creating scq */ + ret = hifc_notify_up_open_timer(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't open timer", + hba->port_cfg.port_id); + + goto out_destroy_queues; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]HIFC port(0x%x) initialize host resources succeeded", + hba->port_cfg.port_id); + + return ret; + +out_destroy_queues: + hifc_flush_scq_ctx(hba); + hifc_flush_srq_ctx(hba); + hifc_flush_root_ctx(hba); + hifc_destroy_queues(hba); + +out_release_chip_access: + hifc_release_chip_access(hba); + +out_unmap_memory: + return ret; +} + +static void hifc_update_lport_config( + struct hifc_hba_s *v_hba, + struct unf_low_level_function_op_s *v_low_level_fun) +{ +#define HIFC_MULTI_CONF_NONSUPPORT 0 + + struct unf_lport_cfg_item_s *lport_cfg_items = NULL; + + lport_cfg_items = &v_low_level_fun->lport_cfg_items; + + if (v_hba->port_cfg.max_login < v_low_level_fun->support_max_rport) + lport_cfg_items->max_login = v_hba->port_cfg.max_login; + else + lport_cfg_items->max_login = v_low_level_fun->support_max_rport; + + if ((v_hba->port_cfg.sest_num / 2) < UNF_RESERVE_SFS_XCHG) + lport_cfg_items->max_io = v_hba->port_cfg.sest_num; + else + lport_cfg_items->max_io = v_hba->port_cfg.sest_num - + UNF_RESERVE_SFS_XCHG; + + lport_cfg_items->max_sfs_xchg = UNF_MAX_SFS_XCHG; + lport_cfg_items->port_id = v_hba->port_cfg.port_id; + lport_cfg_items->port_mode = v_hba->port_cfg.port_mode; + lport_cfg_items->port_topology = v_hba->port_cfg.port_topology; + lport_cfg_items->max_queue_depth = v_hba->port_cfg.max_queue_depth; + + lport_cfg_items->port_speed = v_hba->port_cfg.port_speed; + lport_cfg_items->tape_support = v_hba->port_cfg.tape_support; + lport_cfg_items->res_mgmt_enabled = UNF_FALSE; + + v_low_level_fun->sys_port_name = + *(unsigned long long *)v_hba->sys_port_name; + v_low_level_fun->sys_node_name = + *(unsigned long long *)v_hba->sys_node_name; + + /* Update chip information */ + v_low_level_fun->dev = v_hba->pci_dev; + v_low_level_fun->chip_info.chip_work_mode = v_hba->work_mode; + v_low_level_fun->chip_info.chip_type = v_hba->chip_type; + v_low_level_fun->chip_info.disable_err_flag = 0; + v_low_level_fun->support_max_speed = v_hba->max_support_speed; + + v_low_level_fun->chip_id = 0; + + v_low_level_fun->sfp_type = UNF_PORT_TYPE_FC_SFP; + + v_low_level_fun->multi_conf_support = HIFC_MULTI_CONF_NONSUPPORT; + v_low_level_fun->support_max_xid_range = v_hba->port_cfg.sest_num; + v_low_level_fun->update_fw_reset_active = + UNF_PORT_UNGRADE_FW_RESET_INACTIVE; + v_low_level_fun->port_type = DRV_PORT_ENTITY_TYPE_PHYSICAL; + + if ((lport_cfg_items->port_id & UNF_FIRST_LPORT_ID_MASK) == + lport_cfg_items->port_id) { + v_low_level_fun->support_upgrade_report = + UNF_PORT_SUPPORT_UPGRADE_REPORT; + } else { + v_low_level_fun->support_upgrade_report = + UNF_PORT_UNSUPPORT_UPGRADE_REPORT; + } + + v_low_level_fun->low_level_type |= UNF_FC_PROTOCOL_TYPE; +} + +static unsigned int hifc_create_lport(struct hifc_hba_s *v_hba) +{ + void *lport = NULL; + struct unf_low_level_function_op_s low_level_fun; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + hifc_fun_op.dev = v_hba->pci_dev; + memcpy(&low_level_fun, &hifc_fun_op, + sizeof(struct unf_low_level_function_op_s)); + + /* Update port configuration table */ + hifc_update_lport_config(v_hba, &low_level_fun); + + /* Apply for lport resources */ + UNF_LOWLEVEL_ALLOC_LPORT(lport, v_hba, &low_level_fun); + if (!lport) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't allocate Lport", + v_hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + v_hba->lport = lport; + + return RETURN_OK; +} + +void hifc_release_probe_index(unsigned int probe_index) +{ + if (probe_index >= HIFC_MAX_PROBE_PORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Probe index(0x%x) is invalid", probe_index); + + return; + } + + spin_lock(&probe_spin_lock); + if (!test_bit((int)probe_index, (const unsigned long *)probe_bit_map)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Probe index(0x%x) is not probed", + probe_index); + + spin_unlock(&probe_spin_lock); + + return; + } + + clear_bit((int)probe_index, probe_bit_map); + spin_unlock(&probe_spin_lock); +} + +static void hifc_release_host_res(struct hifc_hba_s *v_hba) +{ + hifc_destroy_queues(v_hba); + hifc_release_chip_access(v_hba); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) release low level resource done", + v_hba->port_cfg.port_id); +} + +static struct hifc_hba_s *hifc_init_hba(struct pci_dev *v_dev, + void *v_hwdev_handle, + struct hifc_chip_info_s *v_chip_info, + unsigned char v_card_num) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = NULL; + + /* Allocate HBA */ + hba = kmalloc(sizeof(*hba), GFP_ATOMIC); + HIFC_CHECK(INVALID_VALUE32, hba, return NULL); + memset(hba, 0, sizeof(struct hifc_hba_s)); + + /* Heartbeat default */ + hba->heart_status = 1; + + /* Private data in pciDev */ + hba->pci_dev = v_dev; /* PCI device */ + hba->hw_dev_handle = v_hwdev_handle; + + /* Work mode */ + hba->work_mode = v_chip_info->work_mode; + /* Create work queue */ + hba->work_queue = create_singlethread_workqueue("hifc"); + if (!hba->work_queue) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Hifc creat workqueue failed"); + + goto out_free_hba; + } + /* Init delay work */ + INIT_DELAYED_WORK(&hba->delay_info.del_work, + hifc_rcvd_els_from_srq_time_out); + + /* Notice: Only use FC features */ + (void)hifc_support_fc(v_hwdev_handle, &hba->fc_service_cap); + /* Check parent context available */ + if (hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num == 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]FC parent context is not allocated in this function"); + + goto out_destroy_workqueue; + } + max_parent_qpc_num = hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num; + + /* Get port configuration */ + ret = hifc_get_port_cfg(hba, v_chip_info, v_card_num); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Can't get port configuration"); + + goto out_destroy_workqueue; + } + /* Get WWN */ + *(unsigned long long *)hba->sys_node_name = v_chip_info->wwnn; + *(unsigned long long *)hba->sys_port_name = v_chip_info->wwpn; + + /* Initialize host resources */ + ret = hifc_init_host_res(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't initialize host resource", + hba->port_cfg.port_id); + + goto out_destroy_workqueue; + } + + /* Local Port create */ + ret = hifc_create_lport(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't create lport", + hba->port_cfg.port_id); + goto out_release_host_res; + } + complete(&hba->hba_init_complete); + + /* Print reference count */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]Port(0x%x) probe succeeded.", + hba->port_cfg.port_id); + + return hba; + +out_release_host_res: + hifc_flush_scq_ctx(hba); + hifc_flush_srq_ctx(hba); + hifc_flush_root_ctx(hba); + hifc_release_host_res(hba); + +out_destroy_workqueue: + flush_workqueue(hba->work_queue); + destroy_workqueue(hba->work_queue); + hba->work_queue = NULL; + +out_free_hba: + kfree(hba); + + return NULL; +} + +void hifc_get_total_probed_num(unsigned int *v_probe_cnt) +{ + unsigned int i = 0; + unsigned int count = 0; + + spin_lock(&probe_spin_lock); + for (i = 0; i < HIFC_MAX_PROBE_PORT_NUM; i++) { + if (test_bit((int)i, (const unsigned long *)probe_bit_map)) + count++; + } + + *v_probe_cnt = count; + spin_unlock(&probe_spin_lock); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Probed port total number is 0x%x", count); +} + +static unsigned int hifc_assign_card_num(struct hifc_lld_dev *lld_dev, + struct hifc_chip_info_s *v_chip_info, + unsigned char *v_card_num) +{ + unsigned char i = 0; + unsigned long long card_index = 0; + + card_index = (!pci_is_root_bus(lld_dev->pdev->bus)) ? + lld_dev->pdev->bus->parent->number : + lld_dev->pdev->bus->number; + + spin_lock(&probe_spin_lock); + + for (i = 0; i < HIFC_MAX_CARD_NUM; i++) { + if (test_bit((int)i, (const unsigned long *)card_num_bit_map)) { + if ((card_num_manage[i].card_number == card_index) && + (card_num_manage[i].is_removing == UNF_FALSE)) { + card_num_manage[i].port_count++; + *v_card_num = i; + spin_unlock(&probe_spin_lock); + return RETURN_OK; + } + } + } + + for (i = 0; i < HIFC_MAX_CARD_NUM; i++) { + if (!test_bit((int)i, + (const unsigned long *)card_num_bit_map)) { + card_num_manage[i].card_number = card_index; + card_num_manage[i].port_count = 1; + card_num_manage[i].is_removing = UNF_FALSE; + *v_card_num = i; + set_bit(i, card_num_bit_map); + spin_unlock(&probe_spin_lock); + + return RETURN_OK; + } + } + + spin_unlock(&probe_spin_lock); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Have probe more than 0x%x port, probe failed", i); + + return UNF_RETURN_ERROR; +} + +static void hifc_dec_and_free_card_num(unsigned char v_card_num) +{ + /* 2 ports per card */ + if (v_card_num >= HIFC_MAX_CARD_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Card number(0x%x) is invalid", v_card_num); + + return; + } + + spin_lock(&probe_spin_lock); + + if (test_bit((int)v_card_num, + (const unsigned long *)card_num_bit_map)) { + card_num_manage[v_card_num].port_count--; + card_num_manage[v_card_num].is_removing = UNF_TRUE; + + if (card_num_manage[v_card_num].port_count == 0) { + card_num_manage[v_card_num].card_number = 0; + card_num_manage[v_card_num].is_removing = UNF_FALSE; + clear_bit((int)v_card_num, card_num_bit_map); + } + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Can not find card number(0x%x)", v_card_num); + } + + spin_unlock(&probe_spin_lock); +} + +unsigned int hifc_assign_probe_index(unsigned int *v_probe_index) +{ + unsigned int i = 0; + + spin_lock(&probe_spin_lock); + for (i = 0; i < HIFC_MAX_PROBE_PORT_NUM; i++) { + if (!test_bit((int)i, (const unsigned long *)probe_bit_map)) { + *v_probe_index = i; + set_bit(i, probe_bit_map); + spin_unlock(&probe_spin_lock); + + return RETURN_OK; + } + } + spin_unlock(&probe_spin_lock); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Have probe more than 0x%x port, probe failed", i); + + return UNF_RETURN_ERROR; +} + +int hifc_probe(struct hifc_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name) +{ + struct pci_dev *dev = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int probe_index = 0; + unsigned int probe_total_num = 0; + unsigned char card_num = INVALID_VALUE8; + struct hifc_chip_info_s chip_info; + + HIFC_CHECK(INVALID_VALUE32, lld_dev, return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, lld_dev->hwdev, + return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, lld_dev->pdev, return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, uld_dev, return UNF_RETURN_ERROR_S32); + HIFC_CHECK(INVALID_VALUE32, uld_dev_name, return UNF_RETURN_ERROR_S32); + + dev = lld_dev->pdev; /* pcie device */ + + memset(&chip_info, 0, sizeof(struct hifc_chip_info_s)); + /* 1. Get & check Total_Probed_number */ + hifc_get_total_probed_num(&probe_total_num); + if (probe_total_num >= HIFC_MAX_PORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Total probe num (0x%x) is larger than allowed number(64)", + probe_total_num); + + return UNF_RETURN_ERROR_S32; + } + /* 2. Check device work mode */ + if (hifc_support_fc(lld_dev->hwdev, NULL)) { + chip_info.work_mode = HIFC_SMARTIO_WORK_MODE_FC; + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port work mode is not FC"); + return UNF_RETURN_ERROR_S32; + } + + /* 4. Assign & Get new Probe index */ + ret = hifc_assign_probe_index(&probe_index); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]AssignProbeIndex fail"); + + return UNF_RETURN_ERROR_S32; + } + + ret = hifc_get_chip_capability((void *)lld_dev->hwdev, &chip_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]GetChipCapability fail"); + return UNF_RETURN_ERROR_S32; + } + + /* Assign & Get new Card number */ + ret = hifc_assign_card_num(lld_dev, &chip_info, &card_num); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_assign_card_num fail"); + hifc_release_probe_index(probe_index); + + return UNF_RETURN_ERROR_S32; + } + + /* Init HBA resource */ + hba = hifc_init_hba(dev, lld_dev->hwdev, &chip_info, card_num); + if (!hba) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Probe HBA(0x%x) failed.", probe_index); + + hifc_release_probe_index(probe_index); + hifc_dec_and_free_card_num(card_num); + + return UNF_RETURN_ERROR_S32; + } + + /* Name by the order of probe */ + *uld_dev = hba; + + snprintf(uld_dev_name, HIFC_PORT_NAME_STR_LEN, "%s%02x%02x", + HIFC_PORT_NAME_LABEL, + hba->card_info.card_num, hba->card_info.func_num); + memcpy(hba->port_name, uld_dev_name, HIFC_PORT_NAME_STR_LEN); + + hba->probe_index = probe_index; + hifc_hba[probe_index] = hba; + + return RETURN_OK; +} + +static unsigned int hifc_port_check_fw_ready(struct hifc_hba_s *v_hba) +{ +#define HIFC_PORT_CLEAR_DONE 0 +#define HIFC_PORT_CLEAR_DOING 1 + unsigned int clear_state = HIFC_PORT_CLEAR_DOING; + unsigned int ret = RETURN_OK; + unsigned int wait_time_out = 0; + + do { + msleep(1000); + wait_time_out += 1000; + ret = hifc_mbx_get_fw_clear_stat(v_hba, &clear_state); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + /* Total time more than 30s, retry more than 3 times, failed */ + if ((wait_time_out > 30000) && + (clear_state != HIFC_PORT_CLEAR_DONE)) + return UNF_RETURN_ERROR; + + } while (clear_state != HIFC_PORT_CLEAR_DONE); + + return RETURN_OK; +} + +static unsigned int hifc_sfp_switch(void *v_hba, void *v_para_in) +{ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + int turn_on = UNF_FALSE; + unsigned int ret = RETURN_OK; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_para_in, return UNF_RETURN_ERROR); + + /* Redundancy check */ + turn_on = *((int *)v_para_in); + if (turn_on == hba->sfp_on) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) FC physical port is already %s", + hba->port_cfg.port_id, (turn_on) ? "on" : "off"); + + return ret; + } + if (turn_on == UNF_TRUE) { + ret = hifc_port_check_fw_ready(hba); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Get port(0x%x) clear state failed, turn on fail", + hba->port_cfg.port_id); + return ret; + } + /* At first, configure port table info if necessary */ + ret = hifc_config_port_table(hba); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) can't configurate port table", + hba->port_cfg.port_id); + + return ret; + } + } + + /* Switch physical port */ + ret = hifc_port_switch(hba, turn_on); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) switch failed", + hba->port_cfg.port_id); + return ret; + } + + /* Update HBA's sfp state */ + hba->sfp_on = turn_on; + + return ret; +} + +static unsigned int hifc_destroy_lport(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_LOWLEVEL_RELEASE_LOCAL_PORT(ret, v_hba->lport); + v_hba->lport = NULL; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) destroy L_Port done", + v_hba->port_cfg.port_id); + + return ret; +} + +unsigned int hifc_port_reset(struct hifc_hba_s *v_hba) +{ + unsigned int ret = RETURN_OK; + unsigned long time_out = 0; + int sfp_before_reset = UNF_FALSE; + int off_para_in = UNF_FALSE; + struct pci_dev *dev = NULL; + struct hifc_hba_s *hba = v_hba; + + HIFC_CHECK(INVALID_VALUE32, hba, return UNF_RETURN_ERROR); + dev = hba->pci_dev; + HIFC_CHECK(INVALID_VALUE32, dev, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) reset HBA begin", + hba->port_cfg.port_id); + + /* Wait for last init/reset completion */ + time_out = wait_for_completion_timeout( + &hba->hba_init_complete, + (unsigned long)HIFC_PORT_INIT_TIME_SEC_MAX * HZ); + + if (time_out == UNF_ZERO) { + UNF_TRACE(INVALID_VALUE32, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Last HBA initialize/reset timeout: %d second", + HIFC_PORT_INIT_TIME_SEC_MAX); + + return UNF_RETURN_ERROR; + } + + /* Save current port state */ + sfp_before_reset = hba->sfp_on; + + /* Inform the reset event to CM level before beginning */ + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RESET_START, NULL); + hba->reset_time = jiffies; + + /* Close SFP */ + ret = hifc_sfp_switch(hba, &off_para_in); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can't close SFP", + hba->port_cfg.port_id); + hba->sfp_on = sfp_before_reset; + + complete(&hba->hba_init_complete); + + return ret; + } + + ret = hifc_port_check_fw_ready(hba); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Get port(0x%x) clear state failed, hang port and report chip error", + hba->port_cfg.port_id); + + complete(&hba->hba_init_complete); + return ret; + } + + hifc_queue_pre_process(hba, UNF_FALSE); + + ret = hifc_mbox_reset_chip(hba, HIFC_MBOX_SUBTYPE_LIGHT_RESET); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]HIFC port(0x%x) can't reset chip mailbox", + hba->port_cfg.port_id); + + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, + UNF_PORT_GET_FWLOG, NULL); + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, + UNF_PORT_DEBUG_DUMP, NULL); + } + + /* Inform the success to CM level */ + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RESET_END, NULL); + + /* Queue open */ + hifc_enable_queues_dispatch(hba); + + /* Open SFP */ + (void)hifc_sfp_switch(hba, &sfp_before_reset); + + complete(&hba->hba_init_complete); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]Port(0x%x) reset HBA done", + hba->port_cfg.port_id); + + return ret; +#undef HIFC_WAIT_LINKDOWN_EVENT_MS +} + +static unsigned int hifc_delete_scqc_via_cmdq_sync(struct hifc_hba_s *v_hba, + unsigned int scqn) +{ + /* Via CMND Queue */ +#define HIFC_DEL_SCQC_TIMEOUT 3000 + + int ret; + struct hifcoe_cmdqe_delete_scqc_s del_scqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + /* Alloc cmd buffer */ + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf alloc failed"); + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC); + return UNF_RETURN_ERROR; + } + + /* Build & Send Cmnd */ + memset(&del_scqc_cmd, 0, sizeof(del_scqc_cmd)); + del_scqc_cmd.wd0.task_type = HIFCOE_TASK_T_DEL_SCQC; + del_scqc_cmd.wd1.scqn = HIFC_LSW(scqn); + hifc_cpu_to_big32(&del_scqc_cmd, sizeof(del_scqc_cmd)); + memcpy(cmdq_in_buf->buf, &del_scqc_cmd, sizeof(del_scqc_cmd)); + cmdq_in_buf->size = sizeof(del_scqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, + cmdq_in_buf, NULL, HIFC_DEL_SCQC_TIMEOUT); + + /* Free cmnd buffer */ + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send del scqc via cmdq failed, ret=0x%x", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_DEL_SCQC); + + return RETURN_OK; +} + +void hifc_flush_scq_ctx(struct hifc_hba_s *v_hba) +{ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Start destroy total 0x%x SCQC", HIFC_TOTAL_SCQ_NUM); + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + (void)hifc_delete_scqc_via_cmdq_sync(v_hba, 0); +} + +void hifc_set_hba_flush_state(struct hifc_hba_s *v_hba, int in_flush) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&v_hba->flush_state_lock, flag); + v_hba->in_flushing = in_flush; + spin_unlock_irqrestore(&v_hba->flush_state_lock, flag); +} + +static int hifc_hba_is_present(struct hifc_hba_s *v_hba) +{ + int ret = RETURN_OK; + int present = UNF_FALSE; + unsigned int vendor_id = 0; + + ret = pci_read_config_dword(v_hba->pci_dev, 0, &vendor_id); + vendor_id &= HIFC_PCI_VENDOR_ID_MASK; + if ((ret == RETURN_OK) && (vendor_id == HIFC_PCI_VENDOR_ID)) { + present = UNF_TRUE; + } else { + present = UNF_FALSE; + v_hba->dev_present = UNF_FALSE; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]Port %s remove: vender_id=0x%x, ret=0x%x", + present ? "normal" : "surprise", vendor_id, ret); + + return present; +} + +static void hifc_exit(struct pci_dev *v_dev, struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + int sfp_switch = UNF_FALSE; + int present = UNF_TRUE; + + v_hba->removing = UNF_TRUE; + + /* 1. Check HBA present or not */ + present = hifc_hba_is_present(v_hba); + if (present == UNF_TRUE) { + if (v_hba->phy_link == UNF_PORT_LINK_DOWN) + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHDONE; + + /* At first, close sfp */ + sfp_switch = UNF_FALSE; + (void)hifc_sfp_switch((void *)v_hba, (void *)&sfp_switch); + } + + /* 2. Report COM with HBA removing: delete route timer delay work */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_BEGIN_REMOVE, NULL); + + /* 3. Report COM with HBA Nop, COM release I/O(s) & R_Port(s) forcely */ + UNF_LOWLEVEL_PORT_EVENT(ret, v_hba->lport, UNF_PORT_NOP, NULL); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]PCI device(%p) remove port(0x%x) failed", + v_dev, v_hba->port_index); + } + + if (present == UNF_TRUE) { + /* 4.1 Wait for all SQ empty, free SRQ buffer & SRQC */ + hifc_queue_pre_process(v_hba, UNF_TRUE); + } + + /* 5. Destroy L_Port */ + (void)hifc_destroy_lport(v_hba); + + /* 6. With HBA is present */ + if (present == UNF_TRUE) { + /* Enable Queues dispatch */ + hifc_enable_queues_dispatch(v_hba); + /* Need reset port if necessary */ + (void)hifc_mbox_reset_chip(v_hba, + HIFC_MBOX_SUBTYPE_HEAVY_RESET); + + /* Flush SCQ context */ + hifc_flush_scq_ctx(v_hba); + + /* Flush SRQ context */ + hifc_flush_srq_ctx(v_hba); + + /* Flush Root context in order to prevent DMA */ + hifc_flush_root_ctx(v_hba); + + /* + * NOTE: while flushing txrx, hash bucket will be cached out in + * UP. Wait to clear resources completely + */ + msleep(1000); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) flush scq & srq & root context done", + v_hba->port_cfg.port_id); + } + + /* 7. Notify uP to close timer before delete SCQ */ + ret = hifc_notify_up_close_timer(v_hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]HIFC port(0x%x) can't close timer", + v_hba->port_cfg.port_id); + } + + /* 8. Release host resources */ + hifc_release_host_res(v_hba); + + /* 9. Destroy FC work queue */ + if (v_hba->work_queue) { + flush_workqueue(v_hba->work_queue); + destroy_workqueue(v_hba->work_queue); + v_hba->work_queue = NULL; + } + + /* 10. Release Probe index & Decrease card number */ + hifc_release_probe_index(v_hba->probe_index); + hifc_dec_and_free_card_num((unsigned char)v_hba->card_info.card_num); + + /* 11. Free HBA memory */ + kfree(v_hba); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]PCI device(%p) remove succeed", v_dev); +} + +void hifc_remove(struct hifc_lld_dev *lld_dev, void *uld_dev) +{ + struct pci_dev *dev = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)uld_dev; + unsigned int probe_total_num = 0; + unsigned int probe_index = 0; + + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != uld_dev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->hwdev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->pdev, return); + + dev = hba->pci_dev; + + /* Get total probed port number */ + hifc_get_total_probed_num(&probe_total_num); + if (probe_total_num < 1) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port manager is empty and no need to remove"); + return; + } + + /* check pci vendor id */ + if (dev->vendor != HIFC_PCI_VENDOR_ID_HUAWEI) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Wrong vendor id(0x%x) and exit", dev->vendor); + return; + } + + /* Check function ability */ + + if (!(hifc_support_fc(lld_dev->hwdev, NULL))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]FC is not enable in this function"); + return; + } + + /* Get probe index */ + probe_index = hba->probe_index; + + /* Parent context allocation check */ + if (hba->fc_service_cap.dev_fc_cap.max_parent_qpc_num == 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]FC parent context not allocate in this function"); + return; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]HBA(0x%x) start removing...", hba->port_index); + + /* HBA removinig... */ + hifc_exit(dev, hba); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Port(0x%x) pci device removed, vendorid(0x%04x) devid(0x%04x)", + probe_index, dev->vendor, dev->device); + + /* Probe index check */ + if (probe_index < HIFC_HBA_PORT_MAX_NUM) { + hifc_hba[probe_index] = NULL; + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Probe index(0x%x) is invalid and remove failed", + probe_index); + } + + hifc_get_total_probed_num(&probe_total_num); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[event]Removed index=%u, RemainNum=%u", + probe_index, probe_total_num); +} + +void hifc_event(struct hifc_lld_dev *lld_dev, void *uld_dev, + struct hifc_event_info *event) +{ + struct hifc_hba_s *hba = uld_dev; + + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->hwdev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != lld_dev->pdev, return); + HIFC_CHECK(INVALID_VALUE32, NULL != hba, return); + HIFC_CHECK(INVALID_VALUE32, NULL != event, return); + + switch (event->type) { + case HIFC_EVENT_HEART_LOST: + hba->heart_status = 0; + HIFC_COM_UP_ERR_EVENT_STAT(hba, HIFC_EVENT_HEART_LOST); + break; + default: + break; + } +} + +static unsigned int hifc_get_hba_pcie_link_state(void *v_hba, + void *v_link_state) +{ + int *link_state = v_link_state; + int present = UNF_TRUE; + struct hifc_hba_s *hba = v_hba; + int ret; + int last_dev_state = UNF_TRUE; + int cur_dev_state = UNF_TRUE; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_link_state, return UNF_RETURN_ERROR); + last_dev_state = hba->dev_present; + ret = hifc_get_card_present_state(hba->hw_dev_handle, (bool *)&present); + if (ret || present != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]port(0x%x) is not present,ret:%d, present:%d", + hba->port_cfg.port_id, ret, present); + cur_dev_state = UNF_FALSE; + } else { + cur_dev_state = UNF_TRUE; + } + + hba->dev_present = cur_dev_state; + + /* the heartbeat is considered lost only when the PCIE link is down for + * two times. + */ + if ((last_dev_state == UNF_FALSE) && (cur_dev_state == UNF_FALSE)) + hba->heart_status = UNF_FALSE; + *link_state = hba->dev_present; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_ABNORMAL, UNF_INFO, + "Port:0x%x,get dev present:%d", hba->port_cfg.port_id, + *link_state); + return RETURN_OK; +} diff --git a/drivers/scsi/huawei/hifc/hifc_hba.h b/drivers/scsi/huawei/hifc/hifc_hba.h new file mode 100644 index 000000000000..0ffa7c3402b0 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_hba.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_HBA_H__ +#define __HIFC_HBA_H__ + +#include "unf_common.h" +#include "hifc_queue.h" +#include "hifc_api_cmd.h" +#include "hifc_mgmt.h" + +#define HIFC_PCI_VENDOR_ID_MASK (0xffff) + +#define HIFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT 8 +#define HIFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT 255 +#define HIFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT 255 +#define HIFC_LOWLEVEL_DEFAULT_842G_BB_CREDIT 255 +#define HIFC_LOWLEVEL_DEFAULT_BB_SCN 0 + +#define HIFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE 28081 +#define HIFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE 14100 +#define HIFC_LOWLEVEL_DEFAULT_842G_ESCH_VALUE 7000 +#define HIFC_LOWLEVEL_DEFAULT_ESCH_BUS_SIZE 0x2000 + +#define HIFC_SMARTIO_WORK_MODE_FC 0x1 +#define UNF_FUN_ID_MASK 0x07 +#define UNF_HIFC_FC 0x01 +#define UNF_HIFC_MAXNPIV_NUM 64 +#define HIFC_MAX_COS_NUM 8 +#define HIFC_PCI_VENDOR_ID_HUAWEI 0x19e5 +#define HIFC_SCQ_CNTX_SIZE 32 +#define HIFC_SRQ_CNTX_SIZE 64 +#define HIFC_PORT_INIT_TIME_SEC_MAX 1 + +#define HIFC_PORT_NAME_LABEL "hifc" +#define HIFC_PORT_NAME_STR_LEN 16 +#define HIFC_MAX_PROBE_PORT_NUM 64 +#define HIFC_PORT_NUM_PER_TABLE 64 +#define HIFC_MAX_CARD_NUM 32 +#define HIFC_HBA_PORT_MAX_NUM HIFC_MAX_PROBE_PORT_NUM +/* Heart Lost Flag */ +#define HIFC_EVENT_HEART_LOST 0 + +#define HIFC_GET_HBA_PORT_ID(__hba) ((__hba)->port_index) +#define HIFC_HBA_NOT_PRESENT(__hba) ((__hba)->dev_present == UNF_FALSE) + +struct hifc_port_cfg_s { + unsigned int port_id; /* Port ID */ + unsigned int port_mode; /* Port mode:INI(0x20) TGT(0x10) BOTH(0x30) */ + unsigned int port_topology; /* Port topo:0x3:loop,0xc:p2p,0xf:auto */ + unsigned int port_alpa; /* Port ALPA */ + unsigned int max_queue_depth;/* Max Queue depth Registration to SCSI */ + unsigned int sest_num; /* IO burst num:512-4096 */ + unsigned int max_login; /* Max Login Session. */ + unsigned int node_name_hi; /* nodename high 32 bits */ + unsigned int node_name_lo; /* nodename low 32 bits */ + unsigned int port_name_hi; /* portname high 32 bits */ + unsigned int port_name_lo; /* portname low 32 bits */ + /* Port speed 0:auto 4:4Gbps 8:8Gbps 16:16Gbps */ + unsigned int port_speed; + unsigned int interrupt_delay; /* Delay times(ms) in interrupt */ + unsigned int tape_support; /* tape support */ +}; + +#define HIFC_VER_INFO_SIZE 128 +struct hifc_drv_version_s { + char ver[HIFC_VER_INFO_SIZE]; +}; + +struct hifc_card_info_s { + unsigned int card_num : 8; + unsigned int func_num : 8; + unsigned int base_func : 8; + /* + * Card type:UNF_FC_SERVER_BOARD_32_G(6) 32G mode, + * UNF_FC_SERVER_BOARD_16_G(7)16G mode + */ + unsigned int card_type : 8; +}; + +struct hifc_card_num_manage_s { + int is_removing; + unsigned int port_count; + unsigned long long card_number; +}; + +struct hifc_led_state_s { + unsigned char green_speed_led; + unsigned char yellow_speed_led; + unsigned char ac_led; + unsigned char reserved; +}; + +enum hifc_queue_set_stage_e { + HIFC_QUEUE_SET_STAGE_INIT = 0, + HIFC_QUEUE_SET_STAGE_SCANNING, + HIFC_QUEUE_SET_STAGE_FLUSHING, + HIFC_QUEUE_SET_STAGE_FLUSHDONE, + HIFC_QUEUE_SET_STAGE_BUTT +}; + +struct hifc_srq_delay_info_s { + unsigned char srq_delay_flag; /* Check whether need to delay */ + unsigned char root_rq_rcvd_flag; + unsigned short rsd; + spinlock_t srq_lock; + struct unf_frame_pkg_s pkg; + struct delayed_work del_work; +}; + +struct hifc_fw_ver_detail_s { + unsigned char ucode_ver[HIFC_VER_LEN]; + unsigned char ucode_compile_time[HIFC_COMPILE_TIME_LEN]; + unsigned char up_ver[HIFC_VER_LEN]; + unsigned char up_compile_time[HIFC_COMPILE_TIME_LEN]; + unsigned char boot_ver[HIFC_VER_LEN]; + unsigned char boot_compile_time[HIFC_COMPILE_TIME_LEN]; +}; + +/* get wwpn and wwnn */ +struct hifc_chip_info_s { + unsigned char work_mode; + unsigned char tape_support; + unsigned long long wwpn; + unsigned long long wwnn; +}; + +struct hifc_hba_s { + struct pci_dev *pci_dev; + void *hw_dev_handle; + struct fc_service_cap fc_service_cap; + struct hifc_scq_info_s scq_info[HIFC_TOTAL_SCQ_NUM]; + struct hifc_srq_info_s els_srq_info; + /* PCI IO Memory */ + void __iomem *bar0; + unsigned int bar0_len; + + struct hifc_root_info_s root_info; + struct hifc_parent_queue_mgr_s *parent_queue_mgr; + + /* Link list Sq WqePage Pool */ + struct hifc_sq_wqe_page_pool_s sq_wpg_pool; + + enum hifc_queue_set_stage_e q_set_stage; + unsigned int next_clearing_sq; + unsigned int default_sq_id; + /* Port parameters, Obtained through firmware */ + unsigned short q_s_max_count; + unsigned char port_type; /* FC Port */ + unsigned char port_index; /* Phy Port */ + unsigned int default_scqn; + + unsigned char chip_type; /* chiptype:Smart or fc */ + unsigned char work_mode; + struct hifc_card_info_s card_info; + char port_name[HIFC_PORT_NAME_STR_LEN]; + unsigned int probe_index; + + unsigned short exit_base; + unsigned short exit_count; + unsigned short image_count; + unsigned char vpid_start; + unsigned char vpid_end; + + spinlock_t flush_state_lock; + int in_flushing; + + struct hifc_port_cfg_s port_cfg; /* Obtained through Config */ + + void *lport; /* Used in UNF level */ + + unsigned char sys_node_name[UNF_WWN_LEN]; + unsigned char sys_port_name[UNF_WWN_LEN]; + + struct completion hba_init_complete; + struct completion mbox_complete; + + unsigned short removing; + int sfp_on; + int dev_present; + int heart_status; + spinlock_t hba_lock; + unsigned int port_topo_cfg; + unsigned int port_bbscn_cfg; + unsigned int port_loop_role; + unsigned int port_speed_cfg; + unsigned int max_support_speed; + + unsigned char remote_rttov_tag; + unsigned char remote_edtov_tag; + unsigned short compared_bbscn; + unsigned short remote_bbcredit; + unsigned int compared_edtov_val; + unsigned int compared_ratov_val; + enum unf_act_topo_e active_topo; + unsigned int active_port_speed; + unsigned int active_rx_bb_credit; + unsigned int active_bb_scn; + unsigned int phy_link; + unsigned int fcp_conf_cfg; + /* loop */ + unsigned char active_al_pa; + unsigned char loop_map_valid; + unsigned char loop_map[UNF_LOOPMAP_COUNT]; + + unsigned int cos_bit_map; + atomic_t cos_rport_cnt[HIFC_MAX_COS_NUM]; + struct hifc_led_state_s led_states; + unsigned int fec_status; + struct workqueue_struct *work_queue; + unsigned long long reset_time; + struct hifc_srq_delay_info_s delay_info; +}; + +enum drv_port_entity_type_e { + DRV_PORT_ENTITY_TYPE_PHYSICAL = 0, + DRV_PORT_ENTITY_TYPE_VIRTUAL = 1, + DRV_PORT_ENTITY_TYPE_BUTT +}; + +extern struct hifc_hba_s *hifc_hba[HIFC_HBA_PORT_MAX_NUM]; +extern spinlock_t probe_spin_lock; +extern unsigned long probe_bit_map[HIFC_MAX_PROBE_PORT_NUM / + HIFC_PORT_NUM_PER_TABLE]; + +unsigned int hifc_port_reset(struct hifc_hba_s *v_hba); +void hifc_flush_scq_ctx(struct hifc_hba_s *v_hba); +void hifc_set_hba_flush_state(struct hifc_hba_s *v_hba, int in_flush); +void hifc_get_total_probed_num(unsigned int *v_probe_cnt); + +#endif diff --git a/drivers/scsi/huawei/hifc/hifc_queue.c b/drivers/scsi/huawei/hifc/hifc_queue.c new file mode 100644 index 000000000000..2c932d26bf90 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_queue.c @@ -0,0 +1,7020 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "hifc_queue.h" +#include "hifc_module.h" +#include "hifc_wqe.h" +#include "hifc_service.h" +#include "hifc_chipitf.h" +#include "hifc_cqm_object.h" +#include "hifc_cqm_main.h" + +#define HIFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT 0 + +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) +#define HIFC_DONE_MASK (0x00000001) +#else +#define HIFC_DONE_MASK (0x01000000) +#endif +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) +#define HIFC_OWNER_MASK (0x80000000) +#else +#define HIFC_OWNER_MASK (0x00000080) +#endif +#define HIFC_SQ_LINK_PRE (1 << 2) + +#define HIFC_SQ_HEADER_ADDR_ALIGN_SIZE (64) +#define HIFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK (HIFC_SQ_HEADER_ADDR_ALIGN_SIZE - 1) + +#define HIFC_ADDR_64_ALIGN(addr)\ + (((addr) + (HIFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK)) &\ + ~(HIFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK)) + +static unsigned int hifc_get_parity_value(unsigned long long *v_src_data, + unsigned int v_row, + unsigned int v_column) +{ + unsigned int i = 0; + unsigned int j = 0; + unsigned int offset = 0; + unsigned int group = 0; + unsigned int bit_offset = 0; + unsigned int bit_val = 0; + unsigned int tmp_val = 0; + unsigned int dest_data = 0; + + for (i = 0; i < v_row; i++) { + for (j = 0; j < v_column; j++) { + offset = (v_row * j + i); + group = offset / (sizeof(v_src_data[0]) * 8); + bit_offset = offset % (sizeof(v_src_data[0]) * 8); + tmp_val = (v_src_data[group] >> bit_offset) & 0x1; + + if (j == 0) { + bit_val = tmp_val; + continue; + } + + bit_val ^= tmp_val; + } + + bit_val = (~bit_val) & 0x1; + + dest_data |= (bit_val << i); + } + + return dest_data; +} + +/** + * hifc_update_producer_info - update producer pi and obit value + * @q_depth: queue max depth + * @v_pi: pi vaue after updated queue + * @v_owner: owner vaue after updated queue + */ +static void hifc_update_producer_info(unsigned short q_depth, + unsigned short *v_pi, + unsigned short *v_owner) +{ + unsigned short cur_pi = 0; + unsigned short next_pi = 0; + unsigned short owner = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_pi, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_owner, return); + + cur_pi = *v_pi; + next_pi = cur_pi + 1; + + if (next_pi < q_depth) { + *v_pi = next_pi; + } else { + /* PI reversal */ + *v_pi = 0; + + /* obit reversal */ + owner = *v_owner; + *v_owner = !owner; + } +} + +/** + * hifc_update_consumer_info - update consumer ci and obit value + * @q_depth: queue max deppth + * @v_ci: ci vaue after updated queue + * @v_owner: owner vaue after updated queue + */ +static void hifc_update_consumer_info(unsigned short q_depth, + unsigned short *v_ci, + unsigned short *v_owner) +{ + unsigned short cur_ci = 0; + unsigned short next_ci = 0; + unsigned short owner = 0; + + cur_ci = *v_ci; + next_ci = cur_ci + 1; + + if (next_ci < q_depth) { + *v_ci = next_ci; + } else { + /* CI reversal */ + *v_ci = 0; + + /* obit reversal */ + owner = *v_owner; + *v_owner = !owner; + } +} + +static inline void hifc_update_cq_header(struct hifc_ci_record_s *v_ci_record, + unsigned short ci, + unsigned short owner) +{ + unsigned int size = 0; + struct hifc_ci_record_s ci_record = { 0 }; + + size = sizeof(struct hifc_ci_record_s); + memcpy(&ci_record, v_ci_record, size); + hifc_big_to_cpu64(&ci_record, size); + + ci_record.cmsn = ci + + (unsigned short)(owner << HIFC_CQ_HEADER_OWNER_SHIFT); + ci_record.dump_cmsn = ci_record.cmsn; + hifc_cpu_to_big64(&ci_record, size); + + wmb(); + memcpy(v_ci_record, &ci_record, size); +} + +static void hifc_update_srq_header(struct hifc_db_record *v_pmsn_record, + unsigned short pmsn) +{ + unsigned int size = 0; + struct hifc_db_record pmsn_record = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_pmsn_record, return); + + size = sizeof(struct hifc_db_record); + memcpy(&pmsn_record, v_pmsn_record, size); + hifc_big_to_cpu64(&pmsn_record, size); + + pmsn_record.pmsn = pmsn; + pmsn_record.dump_pmsn = pmsn_record.pmsn; + hifc_cpu_to_big64(&pmsn_record, sizeof(struct hifc_db_record)); + + wmb(); + memcpy(v_pmsn_record, &pmsn_record, size); +} + +static unsigned int hifc_alloc_root_sq_info( + struct hifc_root_info_s *v_root_info) +{ + unsigned int sq_info_size = 0; + struct hifc_root_sq_info_s *root_sq_info = NULL; + + sq_info_size = (unsigned int) + (sizeof(struct hifc_root_sq_info_s) * v_root_info->sq_num); + root_sq_info = (struct hifc_root_sq_info_s *)kmalloc(sq_info_size, + GFP_ATOMIC); + if (!root_sq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Root SQ(s) failed"); + + return UNF_RETURN_ERROR; + } + + memset(root_sq_info, 0, sq_info_size); + v_root_info->sq_info = root_sq_info; + + return RETURN_OK; +} + +static void hifc_free_root_sq_info(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index; + struct hifc_root_sq_info_s *sq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + for (q_index = 0; q_index < v_root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(v_root_info->sq_info) + + q_index; + UNF_REFERNCE_VAR(sq_info); + } + kfree(v_root_info->sq_info); + v_root_info->sq_info = NULL; +} + +static void hifc_init_root_sq_base_info(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned short global_base_qpn = 0; + unsigned short max_sq_num = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_root_info->phba; + global_base_qpn = hifc_get_global_base_qpn(hba->hw_dev_handle); + max_sq_num = hifc_func_max_qnum(hba->hw_dev_handle); + + for (q_index = 0; q_index < v_root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(v_root_info->sq_info) + + q_index; + sq_info->qid = (unsigned short)q_index; + sq_info->max_qnum = max_sq_num; + spin_lock_init(&sq_info->root_sq_spin_lock); + sq_info->q_depth = HIFC_ROOT_SQ_DEPTH; + sq_info->wqe_bb_size = HIFC_ROOT_SQ_WQEBB; + sq_info->root_info = v_root_info; + sq_info->global_qpn = global_base_qpn + q_index; + sq_info->owner = HIFC_ROOT_SQ_LOOP_OWNER; + sq_info->in_flush = UNF_FALSE; + } +} + +static unsigned int hifc_alloc_root_sq_ci_addr( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int ci_addr_size = 0; + unsigned int ci_addr_offset = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + + /* Alignment with 4 Bytes */ + ci_addr_size = HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE * v_root_info->sq_num; + hba = (struct hifc_hba_s *)v_root_info->phba; + + v_root_info->virt_sq_ci_table_buff = dma_alloc_coherent( + &hba->pci_dev->dev, + ci_addr_size, + &v_root_info->sq_ci_table_dma, + GFP_KERNEL); + if (!v_root_info->virt_sq_ci_table_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Root SQ CI table failed"); + + return UNF_RETURN_ERROR; + } + memset(v_root_info->virt_sq_ci_table_buff, 0, ci_addr_size); + v_root_info->sq_ci_table_size = ci_addr_size; + + for (q_index = 0; q_index < v_root_info->sq_num; q_index++) { + ci_addr_offset = q_index * HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE; + sq_info = (struct hifc_root_sq_info_s *)(v_root_info->sq_info) + + q_index; + sq_info->ci_addr = (unsigned short *) + ((void *) + (((unsigned char *)v_root_info->virt_sq_ci_table_buff) + + ci_addr_offset)); + sq_info->ci_dma_addr = v_root_info->sq_ci_table_dma + + ci_addr_offset; + } + + return RETURN_OK; +} + +static void hifc_free_root_sq_ci_addr(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + dma_free_coherent(&hba->pci_dev->dev, root_info->sq_ci_table_size, + root_info->virt_sq_ci_table_buff, + root_info->sq_ci_table_dma); + root_info->virt_sq_ci_table_buff = NULL; + root_info->sq_ci_table_dma = 0; + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + sq_info->ci_addr = NULL; + sq_info->ci_dma_addr = 0; + } +} + +static unsigned int hifc_alloc_root_sq_buff( + struct hifc_root_info_s *v_root_info) +{ + int ret = 0; + unsigned int q_index = 0; + unsigned int back_q_num = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + /* Wqe_Base_Size:64; Depth:2048; Page_Size:4096 */ + ret = hifc_slq_alloc(hba->hw_dev_handle, sq_info->wqe_bb_size, + sq_info->q_depth, (u16)PAGE_SIZE, + (u64 *)&sq_info->cla_addr, + &sq_info->sq_handle); + if ((ret != 0) || (!sq_info->sq_handle) || + (sq_info->cla_addr == 0)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]Port(0x%x) slq_allocate Root SQ WQE buffer failed, SQ index = %u, return %u", + hba->port_cfg.port_id, q_index, ret); + + goto free_sq_wqe_buff; + } + } + + return RETURN_OK; + +free_sq_wqe_buff: + back_q_num = q_index; + + for (q_index = 0; q_index < back_q_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, sq_info->sq_handle); + sq_info->sq_handle = NULL; + sq_info->cla_addr = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_sq_buff(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, sq_info->sq_handle); + sq_info->sq_handle = NULL; + sq_info->cla_addr = 0; + } +} + +irqreturn_t hifc_root_sq_irq(int v_irq, void *v_sq_info) +{ + struct hifc_root_sq_info_s *sq_info = NULL; + unsigned short cur_ci = 0; + static unsigned int enter_num; + + enter_num++; + sq_info = (struct hifc_root_sq_info_s *)v_sq_info; + + cur_ci = *sq_info->ci_addr; + cur_ci = be16_to_cpu(cur_ci); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[event]Root SQ Irq Enter Num is %u, Root SQ Ci is %u", + enter_num, cur_ci); + HIFC_REFERNCE_VAR(enter_num, INVALID_VALUE32, IRQ_HANDLED) + HIFC_REFERNCE_VAR(cur_ci, INVALID_VALUE16, IRQ_HANDLED) + + return IRQ_HANDLED; +} + +/* + * hifc_alloc_root_sq_int - Allocate interrupt resources in Root SQ, and + * register callback function. + * @v_root_info: root sq struct info + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_alloc_root_sq_int(struct hifc_root_info_s *v_root_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned int q_index = 0; + unsigned int cfg_num = 0; + unsigned short act_num = 0; + struct irq_info irq_info; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + ret = hifc_alloc_irqs(hba->hw_dev_handle, SERVICE_T_FC, + HIFC_INT_NUM_PER_QUEUE, &irq_info, + &act_num); + if ((ret != RETURN_OK) || + (act_num != HIFC_INT_NUM_PER_QUEUE)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]cfg_alloc_irqs Root SQ irq failed, SQ Index = 0x%x, return 0x%x", + q_index, ret); + + goto free_irq; + } + + if (irq_info.msix_entry_idx >= HIFC_ROOT_Q_INT_ID_MAX) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]cfg_alloc_irqs Root SQ irq id exceed 1024, msix_entry_idx 0x%x", + irq_info.msix_entry_idx); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + irq_info.irq_id); + goto free_irq; + } + + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + sq_info->irq_id = (unsigned int)(irq_info.irq_id); + sq_info->msix_entry_idx = (unsigned short) + (irq_info.msix_entry_idx); + + ret = snprintf(sq_info->irq_name, HIFC_IRQ_NAME_MAX - 1, + "Root SQ 0x%x", q_index); + UNF_FUNCTION_RETURN_CHECK(ret, HIFC_IRQ_NAME_MAX - 1); + ret = request_irq(sq_info->irq_id, hifc_root_sq_irq, 0UL, + sq_info->irq_name, sq_info); + hifc_set_msix_state(hba->hw_dev_handle, sq_info->msix_entry_idx, + HIFC_MSIX_ENABLE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]UNF_OS_REQUEST_IRQ Root SQ irq failed, SQ Index = 0x%x, return 0x%x", + q_index, ret); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + sq_info->irq_id); + sq_info->irq_id = 0; + sq_info->msix_entry_idx = 0; + goto free_irq; + } + } + + return RETURN_OK; + +free_irq: + cfg_num = q_index; + + for (q_index = 0; q_index < cfg_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + free_irq(sq_info->irq_id, sq_info); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + sq_info->irq_id); + sq_info->irq_id = 0; + sq_info->msix_entry_idx = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_sq_int(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + hifc_set_msix_state(hba->hw_dev_handle, sq_info->msix_entry_idx, + HIFC_MSIX_DISABLE); + free_irq(sq_info->irq_id, sq_info); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + sq_info->irq_id); + sq_info->irq_id = 0; + sq_info->msix_entry_idx = 0; + } +} + +/* + * hifc_cfg_root_sq_ci_tbl - Configure CI address in SQ and interrupt number. + * @v_root_info: root queue info + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_cfg_root_sq_ci_tbl( + struct hifc_root_info_s *v_root_info) +{ + int ret = 0; + unsigned int queue_index = 0; + dma_addr_t ci_dma_addr = 0; + struct hifc_sq_attr sq_ci_attr; + struct hifc_root_sq_info_s *sq_info = NULL; + void *handle = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + handle = ((struct hifc_hba_s *)v_root_info->phba)->hw_dev_handle; + + for (queue_index = 0; queue_index < v_root_info->sq_num; + queue_index++) { + /* Sync CI addr to hw, cfg attribute table format */ + memset(&sq_ci_attr, 0, sizeof(struct hifc_sq_attr)); + sq_info = (struct hifc_root_sq_info_s *)v_root_info->sq_info + + queue_index; + + sq_ci_attr.dma_attr_off = 0; + sq_ci_attr.pending_limit = 0; + sq_ci_attr.coalescing_time = 0; + sq_ci_attr.intr_en = HIFC_INT_ENABLE; + sq_ci_attr.intr_idx = sq_info->msix_entry_idx; + sq_ci_attr.l2nic_sqn = queue_index; + ci_dma_addr = HIFC_GET_ROOT_SQ_CI_ADDR(sq_info->ci_dma_addr, + queue_index); + sq_ci_attr.ci_dma_base = ci_dma_addr >> + HIFC_ROOT_SQ_CI_ATTRIBUTE_ADDRESS_SHIFT; + + /* Little endian used in UP */ + ret = hifc_set_ci_table(handle, sq_info->qid, &sq_ci_attr); + if (ret != 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]hifc_set_ci_table failed, return %d", + ret); + + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +/** + * hifc_alloc_root_sq_db - Allocate Doorbell buffer in root SQ + * @v_root_info: root queue struct info + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_alloc_root_sq_db(struct hifc_root_info_s *v_root_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned int q_index = 0; + unsigned int cfg_num = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + ret = hifc_alloc_db_addr(hba->hw_dev_handle, + &sq_info->normal_db.virt_map_addr, + NULL); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]Allocate Root SQ DB address failed, SQ Index = %u, return %d", + q_index, ret); + + goto free_buff; + } + + if (!sq_info->normal_db.virt_map_addr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[err]virt_map_addr is invalid, SQ Index = %u", + q_index); + + goto free_buff; + } + } + + return RETURN_OK; + +free_buff: + cfg_num = q_index; + + for (q_index = 0; q_index < cfg_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + hifc_free_db_addr(hba->hw_dev_handle, + sq_info->normal_db.virt_map_addr, NULL); + sq_info->normal_db.virt_map_addr = NULL; + sq_info->normal_db.phy_addr = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_afree_root_sq_db(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + + hifc_free_db_addr(hba->hw_dev_handle, + sq_info->normal_db.virt_map_addr, NULL); + sq_info->normal_db.virt_map_addr = NULL; + + sq_info->normal_db.phy_addr = 0; + } +} + +static void hifc_assemble_root_sq_ctx(unsigned int cmd_sq_num, + struct hifc_root_sq_info_s *v_sq_info, + void *v_buf) +{ + unsigned int q_index = 0; + unsigned long long ci_init_addr = 0; + struct hifc_root_sq_info_s *sq_info = NULL; + + struct hifc_qp_ctxt_header *cmdq_header = NULL; + struct hifc_sq_ctxt *sq_ctx = NULL; + struct hifc_sq_ctxt_block *sq_ctx_block = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buf, return); + + sq_info = v_sq_info; + sq_ctx_block = (struct hifc_sq_ctxt_block *)v_buf; + cmdq_header = &sq_ctx_block->cmdq_hdr; + + /* CMD header initialization */ + cmdq_header->num_queues = (unsigned short)cmd_sq_num; + cmdq_header->queue_type = HIFC_CMDQ_QUEUE_TYPE_SQ; + cmdq_header->addr_offset = HIFC_ROOT_SQ_CTX_OFFSET(sq_info->max_qnum, + sq_info->qid); + + /* CMD Header convert to big endian */ + hifc_cpu_to_big32(cmdq_header, sizeof(struct hifc_qp_ctxt_header)); + + for (q_index = 0; q_index < cmd_sq_num; q_index++) { + sq_info = v_sq_info + q_index; + sq_ctx = &sq_ctx_block->sq_ctx[q_index]; + memset(sq_ctx, 0, sizeof(struct hifc_sq_ctxt)); + + sq_ctx->sq_ctx_dw0.global_sq_id = sq_info->global_qpn; + sq_ctx->sq_ctx_dw0.ceq_num = 0; + sq_ctx->sq_ctx_dw1.owner = HIFC_ROOT_SQ_LOOP_OWNER; + + ci_init_addr = hifc_slq_get_first_pageaddr(sq_info->sq_handle); + + sq_ctx->sq_ctx_dw2.ci_wqe_page_addr_hi = + HIFC_CI_WQE_PAGE_HIGH_ADDR(ci_init_addr); + sq_ctx->ci_wqe_page_addr_lo = + HIFC_CI_WQE_PAGE_LOW_ADDR(ci_init_addr); + sq_ctx->sq_ctx_dw4.prefetch_min = + HIFC_ROOT_CTX_WQE_PREFETCH_MIN; + sq_ctx->sq_ctx_dw4.prefetch_max = + HIFC_ROOT_CTX_WQE_PREFETCH_MAX; + sq_ctx->sq_ctx_dw4.prefetch_cache_threshold = + HIFC_ROOT_CTX_WQE_PRERETCH_THRESHOLD; + sq_ctx->sq_ctx_dw5.prefetch_owner = HIFC_ROOT_SQ_LOOP_OWNER; + sq_ctx->sq_ctx_dw6.prefetch_ci_wqe_addr_hi = + HIFC_CI_WQE_PAGE_HIGH_ADDR(ci_init_addr); + sq_ctx->prefetch_ci_wqe_addr_lo = + HIFC_CI_WQE_PAGE_LOW_ADDR(ci_init_addr); + sq_ctx->sq_ctx_dw10.cla_addr_hi = + HIFC_CLA_HIGH_ADDR(sq_info->cla_addr); + sq_ctx->cla_addr_lo = HIFC_CLA_LOW_ADDR(sq_info->cla_addr); + + /* big-little endian convert */ + hifc_cpu_to_big32(sq_ctx, sizeof(struct hifc_sq_ctxt)); + } +} + +static unsigned int hifc_cfg_root_sq_ctx(unsigned int cmd_sq_num, + void *v_handle, + struct hifc_cmd_buf *v_chipif_cmd_buff) +{ + int ret = 0; + unsigned short buff_used_size = 0; + unsigned int time_out = 0xF0000000; + unsigned long long uc_return = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_chipif_cmd_buff, + return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(uc_return); + UNF_REFERNCE_VAR(time_out); + UNF_REFERNCE_VAR(ret); + + buff_used_size = (unsigned short)(sizeof(struct hifc_qp_ctxt_header) + + sizeof(struct hifc_sq_ctxt) * cmd_sq_num); + v_chipif_cmd_buff->size = buff_used_size; + + ret = hifc_cmdq_direct_resp(v_handle, + HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_L2NIC, + HIFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT, + v_chipif_cmd_buff, + (u64 *)&uc_return, + time_out); + if ((ret != RETURN_OK) || (uc_return != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]chipif_cmd_to_ucode_imm failed, uiret %d, ullUcRet %llu", + ret, uc_return); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int hifc_calc_cmd_sq_num(unsigned int remain_sq_num) +{ + unsigned int sq_num = 0; + + if (remain_sq_num < HIFC_ROOT_CFG_SQ_NUM_MAX) + sq_num = remain_sq_num; + else + sq_num = HIFC_ROOT_CFG_SQ_NUM_MAX; + + return sq_num; +} + +static unsigned int hifc_init_root_sq_ctx(struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmd_sq_num = 0; + unsigned int remain_sq_num = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_cmd_buf *chipif_cmd_buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info); + + chipif_cmd_buf = hifc_alloc_cmd_buf(hba->hw_dev_handle); + if (!chipif_cmd_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_alloc_cmd_buf failed."); + + return ENOMEM; + } + + remain_sq_num = root_info->sq_num; + while (remain_sq_num > 0) { + cmd_sq_num = hifc_calc_cmd_sq_num(remain_sq_num); + remain_sq_num -= cmd_sq_num; + + /* Assemble root SQ context */ + hifc_assemble_root_sq_ctx(cmd_sq_num, sq_info, + chipif_cmd_buf->buf); + + /* Send via ucode */ + ret = hifc_cfg_root_sq_ctx(cmd_sq_num, hba->hw_dev_handle, + chipif_cmd_buf); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]hifc_cfg_root_sq_ctx failed, return %u", + ret); + break; + } + + sq_info = sq_info + cmd_sq_num; + } + + /* Release cmd buffer */ + hifc_free_cmd_buf(hba->hw_dev_handle, chipif_cmd_buf); + return ret; +} + +static unsigned int hifc_create_root_sqs(struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + /* 1. Allocate sqinfo */ + ret = hifc_alloc_root_sq_info(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_info failed, return %u", + ret); + + return ret; + } + + /* 2. Initialize sqinfo */ + hifc_init_root_sq_base_info(v_root_info); + + /* 3. Apply SQ CI address */ + ret = hifc_alloc_root_sq_ci_addr(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_ci_addr failed, return %u", + ret); + + goto free_sq_info; + } + + /* 4. Allocate SQ buffer */ + ret = hifc_alloc_root_sq_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_buff failed, return %u", + ret); + + goto free_sq_ci_addr; + } + + /* 5. Register SQ(s) interrupt */ + ret = hifc_alloc_root_sq_int(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_int failed, return %u", + ret); + + goto free_root_sq_buff; + } + + /* 6. Configure CI address in SQ and interrupt number */ + ret = hifc_cfg_root_sq_ci_tbl(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]hifc_cfg_root_sq_ci_tbl failed, return %u", + ret); + + goto free_root_sq_int; + } + + /* 7. Allocate Doorbell buffer */ + ret = hifc_alloc_root_sq_db(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_sq_db failed, return %u", + ret); + + goto free_root_sq_int; + } + + /* 8. Initialize SQ context */ + ret = hifc_init_root_sq_ctx(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]hifc_init_root_sq_ctx failed, return %u", + ret); + + goto free_db; + } + + return RETURN_OK; + +free_db: + hifc_afree_root_sq_db(v_root_info); + +free_root_sq_int: + hifc_free_root_sq_int(v_root_info); + +free_root_sq_buff: + hifc_free_root_sq_buff(v_root_info); + +free_sq_ci_addr: + hifc_free_root_sq_ci_addr(v_root_info); + +free_sq_info: + hifc_free_root_sq_info(v_root_info); + + return ret; +} + +static void hifc_destroy_root_sqs(struct hifc_root_info_s *v_root_info) +{ + /* Free DB resources */ + hifc_afree_root_sq_db(v_root_info); + + /* Free interrupt resources */ + hifc_free_root_sq_int(v_root_info); + + /* Free WQE buffers */ + hifc_free_root_sq_buff(v_root_info); + + /* Free CI address */ + hifc_free_root_sq_ci_addr(v_root_info); + + /* Free Root SQ struct */ + hifc_free_root_sq_info(v_root_info); +} + +static unsigned int hifc_alloc_root_rq_info( + struct hifc_root_info_s *v_root_info) +{ + unsigned int rq_info_size = 0; + struct hifc_root_rq_info_s *root_rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + rq_info_size = (unsigned int) + (sizeof(struct hifc_root_rq_info_s) * + v_root_info->rq_num); + root_rq_info = (struct hifc_root_rq_info_s *)kmalloc(rq_info_size, + GFP_ATOMIC); + if (!root_rq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Allocate Root RQ(s) failed"); + + return UNF_RETURN_ERROR; + } + memset(root_rq_info, 0, rq_info_size); + + v_root_info->rq_info = root_rq_info; + + return RETURN_OK; +} + +static void hifc_free_root_rq_info(struct hifc_root_info_s *v_root_info) +{ + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + kfree(root_info->rq_info); + root_info->rq_info = NULL; +} + +static void hifc_init_root_rq_basic_info(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned short global_base_qpn = 0; + unsigned short max_q_num = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + hba = (struct hifc_hba_s *)v_root_info->phba; + global_base_qpn = hifc_get_global_base_qpn(hba->hw_dev_handle); + max_q_num = hifc_func_max_qnum(hba->hw_dev_handle); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + rq_info->max_qnum = max_q_num; + rq_info->qid = (unsigned short)q_index; + rq_info->q_depth = HIFC_ROOT_RQ_DEPTH; + rq_info->wqe_bb_size = HIFC_ROOT_RQ_WQEBB; + rq_info->root_info = v_root_info; + rq_info->global_qpn = global_base_qpn + q_index; + rq_info->owner = HIFC_ROOT_RQ_LOOP_OWNER; + } +} + +static unsigned int hifc_alloc_root_rq_pi_addr( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int pi_addr_size = 0; + unsigned int pi_addr_offset = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + pi_addr_size = HIFC_ROOT_RQ_PI_TABLE_STEP_BYTE * v_root_info->rq_num; + hba = (struct hifc_hba_s *)v_root_info->phba; + + v_root_info->virt_rq_pi_table_buff = + dma_alloc_coherent(&hba->pci_dev->dev, pi_addr_size, + &v_root_info->rq_pi_table_dma, + GFP_KERNEL); + if (!v_root_info->virt_rq_pi_table_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Allocate Root RQ PI table failed"); + + return UNF_RETURN_ERROR; + } + memset(v_root_info->virt_rq_pi_table_buff, 0, pi_addr_size); + v_root_info->rq_pi_table_size = pi_addr_size; + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + pi_addr_offset = q_index * HIFC_ROOT_RQ_PI_TABLE_STEP_BYTE; + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + rq_info->pi_vir_addr = + (unsigned short *) + ((unsigned long long)v_root_info->virt_rq_pi_table_buff + + pi_addr_offset); + rq_info->pi_dma_addr = v_root_info->rq_pi_table_dma + + pi_addr_offset; + } + + return RETURN_OK; +} + +static void hifc_free_root_rq_pi_addr(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + dma_free_coherent(&hba->pci_dev->dev, root_info->rq_pi_table_size, + root_info->virt_rq_pi_table_buff, + root_info->rq_pi_table_dma); + root_info->virt_rq_pi_table_buff = NULL; + root_info->rq_pi_table_dma = 0; + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + rq_info->pi_vir_addr = NULL; + rq_info->pi_dma_addr = 0; + } +} + +static unsigned int hifc_alloc_root_rq_buff( + struct hifc_root_info_s *v_root_info) +{ + int ret = 0; + unsigned int q_index = 0; + unsigned int back_q_num = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)(v_root_info->phba); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + + /* Wqe_Base_Size:32; Depth:2048; Page_Size:4096 */ + ret = hifc_slq_alloc(hba->hw_dev_handle, rq_info->wqe_bb_size, + rq_info->q_depth, (u16)PAGE_SIZE, + (u64 *)&rq_info->ci_cla_tbl_addr, + &rq_info->rq_handle); + if ((ret != 0) || (!rq_info->rq_handle) || + (rq_info->ci_cla_tbl_addr == 0)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]slq_allocate Root RQ Buffer failed, RQ Index = %u, return %u", + q_index, ret); + + goto free_rq_buff; + } + } + + return RETURN_OK; + +free_rq_buff: + back_q_num = q_index; + + for (q_index = 0; q_index < back_q_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, rq_info->rq_handle); + rq_info->rq_handle = NULL; + rq_info->ci_cla_tbl_addr = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_buff(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + hifc_slq_free(hba->hw_dev_handle, rq_info->rq_handle); + rq_info->rq_handle = NULL; + rq_info->ci_cla_tbl_addr = 0; + } +} + +irqreturn_t hifc_root_rq_irq(int v_irq, void *v_rq_info) +{ + HIFC_CHECK(INVALID_VALUE32, NULL != v_rq_info, return IRQ_NONE); + + tasklet_schedule(&((struct hifc_root_rq_info_s *)v_rq_info)->tasklet); + + return IRQ_HANDLED; +} + +static unsigned int hifc_alloc_root_rq_int(struct hifc_root_info_s *v_root_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned int q_index = 0; + unsigned int cfg_num = 0; + unsigned short act_num = 0; + struct irq_info irq_info; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)(v_root_info->phba); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + ret = hifc_alloc_irqs(hba->hw_dev_handle, SERVICE_T_FC, + HIFC_INT_NUM_PER_QUEUE, &irq_info, + &act_num); + if ((ret != RETURN_OK) || (act_num != HIFC_INT_NUM_PER_QUEUE)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]cfg_alloc_irqs Root RQ irq failed, RQ Index = %u, return %d", + q_index, ret); + + goto free_irq; + } + + if (irq_info.msix_entry_idx >= HIFC_ROOT_Q_INT_ID_MAX) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[warn]cfg_alloc_irqs Root RQ irq id exceed 1024, msix_entry_idx %u", + irq_info.msix_entry_idx); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + irq_info.irq_id); + goto free_irq; + } + + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + rq_info->irq_id = (unsigned int)(irq_info.irq_id); + rq_info->msix_entry_idx = (unsigned short) + (irq_info.msix_entry_idx); + + ret = snprintf(rq_info->irq_name, HIFC_IRQ_NAME_MAX - 1, + "Root RQ %u", q_index); + UNF_FUNCTION_RETURN_CHECK(ret, HIFC_IRQ_NAME_MAX - 1); + + tasklet_init(&rq_info->tasklet, hifc_process_root_rqe, + (unsigned long)rq_info); + + ret = request_irq(rq_info->irq_id, hifc_root_rq_irq, 0UL, + rq_info->irq_name, rq_info); + hifc_set_msix_state(hba->hw_dev_handle, rq_info->msix_entry_idx, + HIFC_MSIX_ENABLE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]UNF_OS_REQUEST_IRQ Root RQ irq failed, RQ Index = %u, return %d", + q_index, ret); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + rq_info->irq_id); + memset(rq_info->irq_name, 0, HIFC_IRQ_NAME_MAX); + rq_info->irq_id = 0; + rq_info->msix_entry_idx = 0; + goto free_irq; + } + } + + return RETURN_OK; + +free_irq: + cfg_num = q_index; + + for (q_index = 0; q_index < cfg_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + + free_irq(rq_info->irq_id, rq_info); + tasklet_kill(&rq_info->tasklet); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + rq_info->irq_id); + rq_info->irq_id = 0; + rq_info->msix_entry_idx = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_int(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + hifc_set_msix_state(hba->hw_dev_handle, rq_info->msix_entry_idx, + HIFC_MSIX_DISABLE); + free_irq(rq_info->irq_id, rq_info); + tasklet_kill(&rq_info->tasklet); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + rq_info->irq_id); + rq_info->irq_id = 0; + rq_info->msix_entry_idx = 0; + } +} + +static unsigned int hifc_alloc_root_rq_completion_buff( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int back_index = 0; + unsigned int rqc_buff_size = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + + /* 2048 * Size */ + rqc_buff_size = rq_info->q_depth * + sizeof(struct hifc_root_rq_complet_info_s); + rq_info->rq_completion_buff = dma_alloc_coherent( + &hba->pci_dev->dev, + rqc_buff_size, + &rq_info->rq_completion_dma, + GFP_KERNEL); + if (!rq_info->rq_completion_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Allocate Root RQ completion buffer failed, RQ Index = %u.", + q_index); + + goto free_buff; + } + memset(rq_info->rq_completion_buff, 0, rqc_buff_size); + rq_info->rqc_buff_size = rqc_buff_size; + } + + return RETURN_OK; + +free_buff: + + back_index = q_index; + + for (q_index = 0; q_index < back_index; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, rq_info->rqc_buff_size, + rq_info->rq_completion_buff, + rq_info->rq_completion_dma); + rq_info->rq_completion_buff = NULL; + rq_info->rq_completion_dma = 0; + rq_info->rqc_buff_size = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_completion_buff( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, rq_info->rqc_buff_size, + rq_info->rq_completion_buff, + rq_info->rq_completion_dma); + rq_info->rq_completion_buff = NULL; + rq_info->rq_completion_dma = 0; + rq_info->rqc_buff_size = 0; + } +} + +static unsigned int hifc_alloc_root_rq_rcv_buff( + struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned int back_index = 0; + unsigned int rq_rcv_buff_size = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + + /* Depth(2048) * Buff_Size(2048) */ + rq_rcv_buff_size = rq_info->q_depth * + HIFC_ROOT_RQ_RECV_BUFF_SIZE; + rq_info->rq_rcv_buff = dma_alloc_coherent(&hba->pci_dev->dev, + rq_rcv_buff_size, + &rq_info->rq_rcv_dma, + GFP_KERNEL); + if (!rq_info->rq_rcv_buff) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]Allocate Root RQ receive buffer failed, RQ index = %u", + q_index); + + goto free_buff; + } + memset(rq_info->rq_rcv_buff, 0, rq_rcv_buff_size); + rq_info->rq_rcv_buff_size = rq_rcv_buff_size; + } + + return RETURN_OK; + +free_buff: + + back_index = q_index; + + for (q_index = 0; q_index < back_index; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, + rq_info->rq_rcv_buff_size, + rq_info->rq_rcv_buff, rq_info->rq_rcv_dma); + rq_info->rq_rcv_buff = NULL; + rq_info->rq_rcv_dma = 0; + rq_info->rq_rcv_buff_size = 0; + } + + return UNF_RETURN_ERROR; +} + +static void hifc_free_root_rq_rcv_buff(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)(root_info->phba); + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + dma_free_coherent(&hba->pci_dev->dev, + rq_info->rq_rcv_buff_size, + rq_info->rq_rcv_buff, rq_info->rq_rcv_dma); + rq_info->rq_rcv_buff = NULL; + rq_info->rq_rcv_dma = 0; + rq_info->rq_rcv_buff_size = 0; + } +} + +static void hifc_init_root_rq_wqe(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + unsigned short wqe_index = 0; + unsigned int dma_offset = 0; + dma_addr_t rq_completion_dma = 0; + dma_addr_t rq_rcv_dma = 0; + struct nic_rq_wqe *rq_wqe = NULL; + struct nic_wqe_ctrl_sec *wqe_ctrl = NULL; + struct nic_rq_sge_sec *buff_sge = NULL; + struct nic_rq_bd_sec *rq_buff_bd = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + root_info = v_root_info; + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + + for (wqe_index = 0; wqe_index < rq_info->q_depth; wqe_index++) { + rq_wqe = (struct nic_rq_wqe *) + hifc_slq_get_addr(rq_info->rq_handle, + wqe_index); + if (!rq_wqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, "[err]Get Rq Wqe failed"); + + return; + } + memset(rq_wqe, 0, sizeof(struct nic_rq_wqe)); + + /* Initialize ctrl section */ + wqe_ctrl = &rq_wqe->rq_wqe_ctrl_sec; + wqe_ctrl->bs.owner = HIFC_ROOT_RQ_LOOP_OWNER; + /* control section = 8 bytes */ + wqe_ctrl->bs.ctrl_sec_len = 1; + /* complete section = 16B for SGE */ + wqe_ctrl->bs.completion_sec_len = 2; + /* bd section = 8B */ + wqe_ctrl->bs.buf_desc_sec_len = 1; + wqe_ctrl->bs.cf = 1; /* use SGE */ + + /* Fill wqe receive information section */ + buff_sge = &rq_wqe->rx_sge; + dma_offset = sizeof(struct hifc_root_rq_complet_info_s) + * wqe_index; + rq_completion_dma = rq_info->rq_completion_dma + + dma_offset; + buff_sge->wb_addr_low = + HIFC_LOW_32_BITS(rq_completion_dma); + buff_sge->wb_addr_high = + HIFC_HIGH_32_BITS(rq_completion_dma); + buff_sge->bs0.length = + sizeof(struct hifc_root_rq_complet_info_s); + + /* Fill db */ + rq_buff_bd = &rq_wqe->pkt_buf_addr; + dma_offset = HIFC_ROOT_RQ_RECV_BUFF_SIZE * wqe_index; + rq_rcv_dma = rq_info->rq_rcv_dma + dma_offset; + rq_buff_bd->pkt_buf_addr_high = + HIFC_HIGH_32_BITS(rq_rcv_dma); + rq_buff_bd->pkt_buf_addr_low = + HIFC_LOW_32_BITS(rq_rcv_dma); + + /* big-little endian convert */ + hifc_cpu_to_big32((void *)rq_wqe, + sizeof(struct nic_rq_wqe)); + } + + rq_info->pi = rq_info->q_depth - 1; + rq_info->owner = HIFC_ROOT_RQ_LOOP_OWNER; + } +} + +static unsigned int hifc_calc_cmd_rq_num(unsigned int remain_rq_num) +{ + unsigned int ret = 0; + + if (remain_rq_num < HIFC_ROOT_CFG_RQ_NUM_MAX) + ret = remain_rq_num; + else + ret = HIFC_ROOT_CFG_RQ_NUM_MAX; + + return ret; +} + +static void hifc_assemble_root_rq_ctx(unsigned int cmd_rq_num, + struct hifc_root_rq_info_s *v_rq_info, + void *v_buf) +{ + unsigned int q_index = 0; + unsigned long long ci_init_addr = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_qp_ctxt_header *cmdq_header = NULL; + struct hifc_rq_ctxt *rq_ctx = NULL; + struct hifc_rq_ctxt_block *rq_ctx_block = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_buf, return); + + rq_info = v_rq_info; + rq_ctx_block = (struct hifc_rq_ctxt_block *)v_buf; + cmdq_header = &rq_ctx_block->cmdq_hdr; + + /* cmdheader initialization */ + cmdq_header->num_queues = (unsigned short)cmd_rq_num; + cmdq_header->queue_type = HIFC_CMDQ_QUEUE_TYPE_RQ; + cmdq_header->addr_offset = HIFC_ROOT_RQ_CTX_OFFSET(rq_info->max_qnum, + rq_info->qid); + + /* big-little endian convert */ + hifc_cpu_to_big32(cmdq_header, sizeof(struct hifc_qp_ctxt_header)); + + for (q_index = 0; q_index < cmd_rq_num; q_index++) { + rq_info = v_rq_info + q_index; + rq_ctx = &rq_ctx_block->rq_ctx[q_index]; + memset(rq_ctx, 0, sizeof(struct hifc_rq_ctxt)); + + rq_ctx->pi_gpa_hi = HIFC_HIGH_32_BITS(rq_info->pi_dma_addr); + rq_ctx->pi_gpa_lo = HIFC_LOW_32_BITS(rq_info->pi_dma_addr); + rq_ctx->bs2.ci = 0; + rq_ctx->bs0.pi = 0; + + rq_ctx->bs6.ci_cla_tbl_addr_hi = + HIFC_CLA_HIGH_ADDR(rq_info->ci_cla_tbl_addr); + rq_ctx->ci_cla_tbl_addr_lo = + HIFC_CLA_LOW_ADDR(rq_info->ci_cla_tbl_addr); + + ci_init_addr = hifc_slq_get_first_pageaddr(rq_info->rq_handle); + rq_ctx->bs2.ci_wqe_page_addr_hi = + HIFC_CI_WQE_PAGE_HIGH_ADDR(ci_init_addr); + rq_ctx->ci_wqe_page_addr_lo = + HIFC_CI_WQE_PAGE_LOW_ADDR(ci_init_addr); + + rq_ctx->bs.ceq_en = 0; + rq_ctx->bs.owner = HIFC_ROOT_RQ_LOOP_OWNER; + rq_ctx->bs0.int_num = rq_info->msix_entry_idx; + + rq_ctx->bs3.prefetch_cache_threshold = + HIFC_ROOT_CTX_WQE_PRERETCH_THRESHOLD; + rq_ctx->bs3.prefetch_max = HIFC_ROOT_CTX_WQE_PREFETCH_MAX; + rq_ctx->bs3.prefetch_min = HIFC_ROOT_CTX_WQE_PREFETCH_MIN; + rq_ctx->bs5.prefetch_ci_wqe_page_addr_hi = + rq_ctx->bs2.ci_wqe_page_addr_hi; + rq_ctx->prefetch_ci_wqe_page_addr_lo = + rq_ctx->ci_wqe_page_addr_lo; + + /* big-little endian convert */ + hifc_cpu_to_big32(rq_ctx, sizeof(struct hifc_rq_ctxt)); + } +} + +static unsigned int hifc_cfg_root_rq_ctx(unsigned int cmd_rq_num, + void *v_handle, + struct hifc_cmd_buf *v_chipif_cmd_buff) +{ + int ret = 0; + unsigned short buff_used_size = 0; + unsigned int time_out = 0xF0000000; + unsigned long long uc_return = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_chipif_cmd_buff, + return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(uc_return); + UNF_REFERNCE_VAR(time_out); + UNF_REFERNCE_VAR(ret); + + buff_used_size = (unsigned short)(sizeof(struct hifc_qp_ctxt_header) + + sizeof(struct hifc_rq_ctxt) * cmd_rq_num); + v_chipif_cmd_buff->size = buff_used_size; + + ret = hifc_cmdq_direct_resp(v_handle, + HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_L2NIC, + HIFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT, + v_chipif_cmd_buff, + (u64 *)&uc_return, + time_out); + if ((ret != RETURN_OK) || (uc_return != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_cmdq_direct_resp failed, uiret %d, ullUcRet %llu", + ret, uc_return); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int hifc_init_root_rq_ctx( + void *v_handle, + struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmd_rq_num = 0; + unsigned int remain_rq_num = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_cmd_buf *chipif_cmd_buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_handle, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info); + + chipif_cmd_buf = hifc_alloc_cmd_buf(v_handle); + if (!chipif_cmd_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_alloc_cmd_buf failed"); + + return ENOMEM; + } + + remain_rq_num = root_info->rq_num; + while (remain_rq_num > 0) { + cmd_rq_num = hifc_calc_cmd_rq_num(remain_rq_num); + remain_rq_num -= cmd_rq_num; + + /* Assemble cmd buffer context */ + hifc_assemble_root_rq_ctx(cmd_rq_num, rq_info, + chipif_cmd_buf->buf); + + /* Send via ucode */ + ret = hifc_cfg_root_rq_ctx(cmd_rq_num, v_handle, + chipif_cmd_buf); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]hifc_cfg_root_rq_ctx failed, return %u", + ret); + break; + } + + rq_info = rq_info + cmd_rq_num; + } + + /* Free cmd buffer */ + hifc_free_cmd_buf(v_handle, chipif_cmd_buf); + + return ret; +} + +static void hifc_update_root_rq_pi(struct hifc_root_info_s *v_root_info) +{ + unsigned int q_index = 0; + struct hifc_root_rq_info_s *rq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_root_info, return); + + for (q_index = 0; q_index < v_root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(v_root_info->rq_info) + + q_index; + + wmb(); + *rq_info->pi_vir_addr = cpu_to_be16(rq_info->pi); + } +} + +static unsigned int hifc_create_root_rqs(struct hifc_root_info_s *v_root_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_root_info->phba; + + /* Allocate RQ struct */ + ret = hifc_alloc_root_rq_info(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_info failed"); + + return ret; + } + + /* Initialize RQ basic information */ + hifc_init_root_rq_basic_info(v_root_info); + + /* Apply RQ(s) PI GPA */ + ret = hifc_alloc_root_rq_pi_addr(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_pi_addr failed, return %u", + ret); + + goto free_root_rq_info; + } + + /* Apply RQ's buffer */ + ret = hifc_alloc_root_rq_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_buff failed, return %u", + ret); + + goto free_rq_pi_addr; + } + + /* Apply completion buffer */ + ret = hifc_alloc_root_rq_completion_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_completion_buff failed, return %u", + ret); + + goto free_root_rq_buff; + } + + /* Allocate root RQ receiving buffer */ + ret = hifc_alloc_root_rq_rcv_buff(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_rcv_buff failed, return %u", + ret); + + goto free_root_rq_completion_buff; + } + + /* Initialize RQ WQE struct */ + hifc_init_root_rq_wqe(v_root_info); + + /* Apply RQ's interrupt resources */ + ret = hifc_alloc_root_rq_int(v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]hifc_alloc_root_rq_int failed, return %u", + ret); + + goto free_root_rq_receive_buff; + } + + /* Initialize RQ context */ + ret = hifc_init_root_rq_ctx(hba->hw_dev_handle, v_root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_init_root_rq_ctx Failed, return %u", ret); + + goto free_root_rq_int; + } + + /* Update SQ PI */ + hifc_update_root_rq_pi(v_root_info); + return RETURN_OK; + +free_root_rq_int: + hifc_free_root_rq_int(v_root_info); + +free_root_rq_receive_buff: + hifc_free_root_rq_rcv_buff(v_root_info); + +free_root_rq_completion_buff: + hifc_free_root_rq_completion_buff(v_root_info); + +free_root_rq_buff: + hifc_free_root_rq_buff(v_root_info); + +free_rq_pi_addr: + hifc_free_root_rq_pi_addr(v_root_info); + +free_root_rq_info: + hifc_free_root_rq_info(v_root_info); + + return ret; +} + +static void hifc_destroy_root_rqs(struct hifc_root_info_s *v_root_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, return); + + hifc_free_root_rq_rcv_buff(v_root_info); + + hifc_free_root_rq_completion_buff(v_root_info); + + hifc_free_root_rq_int(v_root_info); + + hifc_free_root_rq_buff(v_root_info); + + hifc_free_root_rq_pi_addr(v_root_info); + + hifc_free_root_rq_info(v_root_info); +} + +static unsigned int hifc_cfg_root_ctx(struct hifc_root_info_s *v_root_info) +{ + int ret; + struct hifc_hba_s *hba = NULL; + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_root_info, + return UNF_RETURN_ERROR); + + root_info = v_root_info; + hba = (struct hifc_hba_s *)root_info->phba; + + ret = hifc_set_root_ctxt(hba->hw_dev_handle, HIFC_ROOT_RQ_DEPTH, + HIFC_ROOT_SQ_DEPTH, + HIFC_ROOT_RQ_RECV_BUFF_SIZE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]chipif_func_vat_info_set failed, return [%d]", + ret); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static void hifc_init_root_basic_info(struct hifc_hba_s *v_hba) +{ + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_hba, return); + + root_info = &v_hba->root_info; + memset(root_info, 0, sizeof(struct hifc_root_info_s)); + + root_info->phba = (void *)v_hba; + + root_info->rq_num = HIFC_ROOT_RQ_NUM; + root_info->sq_num = HIFC_ROOT_SQ_NUM; +} + +unsigned int hifc_create_root_queues(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + int slq_ret = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + /* Initialize basic root information */ + hba = (struct hifc_hba_s *)v_hba; + hifc_init_root_basic_info(hba); + + root_info = &hba->root_info; + + /* slq Init */ + slq_ret = hifc_slq_init(hba->hw_dev_handle, + (int)(root_info->sq_num + root_info->rq_num)); + if (slq_ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_slq_init init failed, ret:0x%x", slq_ret); + + return UNF_RETURN_ERROR; + } + + /* Create SQ, and send cmdq to ucode for initialization of SQ context */ + ret = hifc_create_root_sqs(root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_create_root_sqs failed, return [%u]", + ret); + + hifc_slq_uninit(hba->hw_dev_handle); + return ret; + } + + /* Create RQ */ + ret = hifc_create_root_rqs(root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]hifc_create_root_rqs failed, return [%u]", + ret); + + hifc_destroy_root_sqs(root_info); + hifc_slq_uninit(hba->hw_dev_handle); + return ret; + } + + /* Configure root context */ + ret = hifc_cfg_root_ctx(root_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]hifc_cfg_root_ctx failed, return [%u]", ret); + + hifc_destroy_root_rqs(root_info); + hifc_destroy_root_sqs(root_info); + hifc_slq_uninit(hba->hw_dev_handle); + return ret; + } + + return RETURN_OK; +} + +void hifc_destroy_root_queues(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_root_info_s *root_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + + hifc_destroy_root_rqs(root_info); + hifc_destroy_root_sqs(root_info); + + hifc_slq_uninit(hba->hw_dev_handle); +} + +static void hifc_ring_root_sq_db(struct hifc_hba_s *v_hba, + struct hifc_root_sq_info_s *v_sq_info) +{ + struct nic_tx_doorbell db; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_sq_info, return); + + memset(&db, 0, sizeof(struct nic_tx_doorbell)); + + db.bs0.srv_type = HIFC_DOORBELL_SQ_TYPE; + db.bs0.queue_id = v_sq_info->qid; + db.bs0.pi_high = v_sq_info->pi >> HIFC_DOORBELL_SQ_PI_HIGH_BITS_SHIFT; + db.bs0.cos = 0; + + db.dw0 = cpu_to_be32(db.dw0); + wmb(); + + *((unsigned long long *)(v_sq_info->normal_db.virt_map_addr) + + (v_sq_info->pi & HIFC_DOORBELL_SQ_PI_LOW_BITS_MASK)) = + *(unsigned long long *)&db; +} + +static int hifc_root_sq_is_empty(struct hifc_root_sq_info_s *v_sq_info) +{ + unsigned short cur_pi = 0; + unsigned short cur_ci = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sq_info, return UNF_TRUE); + + /* pi == ci empty, pi-ci = 1 full */ + cur_pi = v_sq_info->pi; + cur_ci = *v_sq_info->ci_addr; + cur_ci = be16_to_cpu(cur_ci); + + if (cur_pi == cur_ci) + return UNF_TRUE; + + return UNF_FALSE; +} + +static int hifc_root_sq_is_full(struct hifc_root_sq_info_s *v_sq_info) +{ + unsigned short cur_pi = 0; + unsigned short cur_ci = 0; + unsigned short valid_wqe_num = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sq_info, return UNF_TRUE); + + /* pi == ci empty, pi-ci = 1 full */ + cur_pi = v_sq_info->pi; + cur_ci = *v_sq_info->ci_addr; + cur_ci = be16_to_cpu(cur_ci); + valid_wqe_num = v_sq_info->q_depth - 1; + + if ((valid_wqe_num == cur_pi - cur_ci) || + (valid_wqe_num == v_sq_info->q_depth + cur_pi - cur_ci)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Root SQ[%u] is full, PI %u, CI %u", + v_sq_info->global_qpn, cur_pi, cur_ci); + return UNF_TRUE; + } + + return UNF_FALSE; +} + +static void hifc_build_root_wqe_qsf(void *v_qsf) +{ + struct hifc_root_qsf_s *root_qsf = NULL; + + root_qsf = (struct hifc_root_qsf_s *)v_qsf; + + /* route to ucode */ + /* MSS range 0x50~0x3E00 */ + root_qsf->route_to_ucode = 1; + root_qsf->mss = 0x3E00; +} + +unsigned int hifc_root_sq_enqueue(void *v_hba, struct hifc_root_sqe_s *v_sqe) +{ + unsigned char task_type = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_sqe_s *sqe = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_sqe, + return UNF_RETURN_ERROR); + + /* Root use one sq by default */ + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info); + task_type = (unsigned char)v_sqe->task_section.fc_dw0.task_type; + + spin_lock_irqsave(&sq_info->root_sq_spin_lock, flag); + + /* Check flush state */ + if (sq_info->in_flush == UNF_TRUE) { + HIFC_ERR_IO_STAT(hba, task_type); + HIFC_HBA_STAT(hba, HIFC_STAT_ROOT_IO_FLUSHED); + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Root SQ is flushing"); + return UNF_RETURN_ERROR; + } + + /* Check root SQ whether is full */ + if (hifc_root_sq_is_full(sq_info) == UNF_TRUE) { + HIFC_ERR_IO_STAT(hba, task_type); + HIFC_HBA_STAT(hba, HIFC_STAT_ROOT_SQ_FULL); + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Root SQ is full"); + return UNF_RETURN_ERROR; + } + + if (unlikely(!hba->heart_status)) { + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Heart status is false"); + return UNF_RETURN_ERROR; + } + /* Get available wqe */ + sqe = (struct hifc_root_sqe_s *)hifc_slq_get_addr(sq_info->sq_handle, + sq_info->pi); + if (!sqe) { + HIFC_ERR_IO_STAT(hba, task_type); + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Get root SQ Sqe failed, PI %u", sq_info->pi); + + return UNF_RETURN_ERROR; + } + + hifc_build_root_wqe_qsf((void *)(&v_sqe->ctrl_section.qsf)); + HIFC_IO_STAT(hba, task_type); + hifc_convert_root_wqe_to_big_endian(v_sqe); + memcpy(sqe, v_sqe, sizeof(struct hifc_root_sqe_s)); + + /* Update PI and Obit */ + hifc_update_producer_info(sq_info->q_depth, &sq_info->pi, + &sq_info->owner); + + /* doorbell */ + hifc_ring_root_sq_db(hba, sq_info); + + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flag); + UNF_REFERNCE_VAR(task_type); + + return RETURN_OK; +} + +static int hifc_root_rqe_done( + struct hifc_root_rq_complet_info_s *v_completion_info) +{ + if (v_completion_info->done != 0) + return UNF_TRUE; + + return UNF_FALSE; +} + +static void hifc_clear_root_rqe_done( + struct hifc_root_rq_complet_info_s *v_completion_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_completion_info, return); + + v_completion_info->done = 0; +} + +static int hifc_check_root_rqe_type( + struct hifc_root_rq_complet_info_s *v_completion_info) +{ + if (v_completion_info->fc_pkt != 0) + return UNF_TRUE; + + return UNF_FALSE; +} + +void hifc_update_root_rq_info(struct hifc_root_rq_info_s *v_rq_info, + unsigned short v_rcv_buf_num) +{ + unsigned short loop = 0; + struct hifc_root_rq_complet_info_s completion_info = { 0 }; + struct hifc_root_rq_complet_info_s *complet_info = NULL; + + for (loop = 0; loop < v_rcv_buf_num; loop++) { + /* Obtain CompletionInfo */ + complet_info = (struct hifc_root_rq_complet_info_s *) + (v_rq_info->rq_completion_buff) + v_rq_info->ci; + + /* big-little endian convert */ + memcpy(&completion_info, complet_info, sizeof(completion_info)); + hifc_big_to_cpu32(&completion_info, sizeof(completion_info)); + + /* Clear done bit */ + hifc_clear_root_rqe_done(&completion_info); + + /* Write back done bit */ + hifc_cpu_to_big32(&completion_info, sizeof(completion_info)); + memcpy(complet_info, &completion_info, sizeof(completion_info)); + + /* Update Obit and PI in RQE */ + hifc_update_producer_info(v_rq_info->q_depth, &v_rq_info->pi, + &v_rq_info->owner); + + v_rq_info->ci = ((v_rq_info->ci + 1) < v_rq_info->q_depth) ? + (v_rq_info->ci + 1) : 0; + + wmb(); + *v_rq_info->pi_vir_addr = cpu_to_be16(v_rq_info->pi); + } +} + +void hifc_root_rqe_analysis( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_info_s *v_rq_info, + struct hifc_root_rq_complet_info_s *v_completion_info, + unsigned short v_rcv_buf_num) +{ + unsigned int ret = UNF_RETURN_ERROR; + + if (v_completion_info->sts_only) { + /* case1: receive ElsRsp Status */ + if (v_completion_info->status == RETURN_OK) + ret = hifc_rq_rcv_els_rsp_sts(v_hba, v_completion_info); + else + ret = hifc_rq_rcv_srv_err(v_hba, v_completion_info); + } else { + ret = hifc_rcv_service_frame_from_rq(v_hba, v_rq_info, + v_completion_info, + v_rcv_buf_num); + } + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[warn]Up Layer Process RQE Frame or Status abnormal(0x%x)", + ret); + } +} + +void hifc_process_root_rqe(unsigned long v_rq_info) +{ + int rqe_done = UNF_FALSE; + int rqe_valid = UNF_FALSE; + unsigned short rcv_buf_num = 0; + unsigned int index = 0; + struct nic_rq_wqe *rq_wqe = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_complet_info_s *complet_info = NULL; + struct hifc_root_rq_complet_info_s completion_info = { 0 }; + + struct hifc_root_rq_info_s *rq_info = + (struct hifc_root_rq_info_s *)v_rq_info; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, rq_info, return); + + root_info = (struct hifc_root_info_s *)(rq_info->root_info); + hba = (struct hifc_hba_s *)(root_info->phba); + + for (index = 0; index < HIFC_RQE_MAX_PROCESS_NUM_PER_INTR; index++) { + /* Obtain RQE */ + rq_wqe = (struct nic_rq_wqe *) + hifc_slq_get_addr(rq_info->rq_handle, rq_info->ci); + if (!rq_wqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, "[err]Get Rqe failed"); + break; + } + + /* Check whether to process RQE */ + complet_info = (struct hifc_root_rq_complet_info_s *) + (rq_info->rq_completion_buff) + rq_info->ci; + + memcpy(&completion_info, complet_info, sizeof(completion_info)); + hifc_big_to_cpu32(&completion_info, sizeof(completion_info)); + + rqe_done = hifc_root_rqe_done(&completion_info); + if (rqe_done != UNF_TRUE) { + atomic_set(&rq_info->flush_state, + HIFC_QUEUE_FLUSH_DONE); + break; + } + + rmb(); + + rcv_buf_num = (completion_info.buf_length + + HIFC_ROOT_RQ_RECV_BUFF_SIZE - 1) / + HIFC_ROOT_RQ_RECV_BUFF_SIZE; + if (rcv_buf_num == 0) + rcv_buf_num = 1; + + rqe_valid = hifc_check_root_rqe_type(&completion_info); + if (rqe_valid == UNF_TRUE) { + hifc_root_rqe_analysis(hba, rq_info, &completion_info, + rcv_buf_num); + } else { + /* Receive illegal frames and record */ + HIFC_IO_STAT(hba, HIFCOE_TASK_T_BUTT); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) Receive an unsupported frame, drop it", + hba->port_cfg.port_id); + } + + hifc_update_root_rq_info(rq_info, rcv_buf_num); + } + + if (index == HIFC_RQE_MAX_PROCESS_NUM_PER_INTR) + tasklet_schedule(&rq_info->tasklet); +} + +static inline int hifc_is_scq_link_wqe(struct hifc_scq_info_s *v_scq_info) +{ + unsigned short custom_scqe_num = 0; + + custom_scqe_num = v_scq_info->ci + 1; + + if ((custom_scqe_num % v_scq_info->wqe_num_per_buf == 0) || + (v_scq_info->valid_wqe_num == custom_scqe_num)) + return UNF_TRUE; + else + return UNF_FALSE; +} + +static inline struct hifcoe_scqe_type_s *hifc_get_scq_entry( + struct hifc_scq_info_s *v_scq_info) +{ + unsigned int buf_id = 0; + unsigned short buf_offset = 0; + unsigned short ci = 0; + struct cqm_buf_list_s *buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, return NULL); + + ci = v_scq_info->ci; + buf_id = ci / v_scq_info->wqe_num_per_buf; + buf = &v_scq_info->cqm_scq_info->q_room_buf_1.buf_list[buf_id]; + buf_offset = (unsigned short)(ci % v_scq_info->wqe_num_per_buf); + + return (struct hifcoe_scqe_type_s *)(buf->va) + buf_offset; +} + +static inline int hifc_is_cqe_done(unsigned int *v_done, unsigned int *v_owner, + unsigned short v_driver_owner) +{ + return ((((unsigned short)(!!(*v_done & HIFC_DONE_MASK)) == + v_driver_owner) && ((unsigned short) + (!!(*v_owner & HIFC_OWNER_MASK)) == v_driver_owner)) ? + UNF_TRUE : UNF_FALSE); +} + +unsigned int hifc_process_scq_cqe_entity(unsigned long v_scq_info, + unsigned int proc_cnt) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int index = 0; + struct hifc_wq_header_s *queue_header = NULL; + struct hifcoe_scqe_type_s *scqe = NULL; + struct hifcoe_scqe_type_s tmp_scqe; + + struct hifc_scq_info_s *scq_info = (struct hifc_scq_info_s *)v_scq_info; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, scq_info, return ret); + + queue_header = (struct hifc_wq_header_s *) + (void *)(scq_info->cqm_scq_info->q_header_vaddr); + + for (index = 0; index < proc_cnt;) { + /* If linked wqe, then update CI */ + if (hifc_is_scq_link_wqe(scq_info) == UNF_TRUE) { + hifc_update_consumer_info(scq_info->valid_wqe_num, + &scq_info->ci, + &scq_info->ci_owner); + hifc_update_cq_header(&queue_header->ci_record, + scq_info->ci, + scq_info->ci_owner); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "[info]Current wqe is a linked wqe"); + continue; + } + + /* Get SCQE and then check obit & donebit whether been set */ + scqe = hifc_get_scq_entry(scq_info); + if (unlikely(!scqe)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[warn]Scqe is NULL"); + break; + } + + if (hifc_is_cqe_done((unsigned int *)(void *)(&scqe->wd0), + (unsigned int *)(void *)(&scqe->ch.wd0), + scq_info->ci_owner) != UNF_TRUE) { + atomic_set(&scq_info->flush_state, + HIFC_QUEUE_FLUSH_DONE); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "[info]Now has no valid scqe"); + break; + } + + /* rmb & do memory copy */ + rmb(); + memcpy(&tmp_scqe, scqe, sizeof(struct hifcoe_scqe_type_s)); + + hifc_big_to_cpu32(&tmp_scqe, sizeof(struct hifcoe_scqe_type_s)); + + /* process SCQ entry */ + ret = hifc_rcv_scqe_entry_from_scq(scq_info->phba, + (void *)&tmp_scqe, + scq_info->queue_id); + if (unlikely(ret != RETURN_OK)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]QueueId(0x%x) scqn(0x%x) scqe process error at CI(0x%x)", + scq_info->queue_id, scq_info->scqn, + scq_info->ci); + } + + /* Update Driver's CI & Obit */ + hifc_update_consumer_info(scq_info->valid_wqe_num, + &scq_info->ci, &scq_info->ci_owner); + hifc_update_cq_header(&queue_header->ci_record, scq_info->ci, + scq_info->ci_owner); + index++; + } + /* Re-schedule again if necessary */ + if (proc_cnt == index) + tasklet_schedule(&scq_info->tasklet); + + return index; +} + +void hifc_set_scq_irq_cfg(struct hifc_hba_s *hba, unsigned int mode, + unsigned short msix_index) +{ + unsigned char pending_limt = 0; + unsigned char coalesc_timer_cfg = 0; + + struct nic_interrupt_info info = { 0 }; + + if (mode != HIFC_SCQ_INTR_LOW_LATENCY_MODE) { + pending_limt = 5; + coalesc_timer_cfg = 10; + } + + memset(&info, 0, sizeof(info)); + info.interrupt_coalesc_set = 1; + info.lli_set = 0; + info.pending_limt = pending_limt; + info.coalesc_timer_cfg = coalesc_timer_cfg; + info.resend_timer_cfg = 0; + info.msix_index = msix_index; + hifc_set_interrupt_cfg(hba->hw_dev_handle, info); +} + +void hifc_process_scq_cqe(unsigned long v_scq_info) +{ + struct hifc_scq_info_s *scq_info = (struct hifc_scq_info_s *)v_scq_info; + + HIFC_CHECK(INVALID_VALUE32, scq_info, return); + + hifc_process_scq_cqe_entity(v_scq_info, + HIFC_CQE_MAX_PROCESS_NUM_PER_INTR); +} + +irqreturn_t hifc_scq_irq(int v_irq, void *v_scq_info) +{ + HIFC_CHECK(INVALID_VALUE32, NULL != v_scq_info, return IRQ_NONE); + + tasklet_schedule(&((struct hifc_scq_info_s *)v_scq_info)->tasklet); + + return IRQ_HANDLED; +} + +static unsigned int hifc_alloc_scq_int(struct hifc_scq_info_s *v_scq_info) +{ + int ret = UNF_RETURN_ERROR_S32; + unsigned short act_num = 0; + struct irq_info irq_info; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, + return UNF_RETURN_ERROR); + + /* 1. Alloc & check SCQ IRQ */ + hba = (struct hifc_hba_s *)(v_scq_info->phba); + ret = hifc_alloc_irqs(hba->hw_dev_handle, SERVICE_T_FC, + HIFC_INT_NUM_PER_QUEUE, &irq_info, &act_num); + if ((ret != RETURN_OK) || (act_num != HIFC_INT_NUM_PER_QUEUE)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate scq irq failed, return %d", ret); + + return UNF_RETURN_ERROR; + } + + if (irq_info.msix_entry_idx >= HIFC_SCQ_INT_ID_MAX) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]SCQ irq id exceed %d, msix_entry_idx %d", + HIFC_SCQ_INT_ID_MAX, irq_info.msix_entry_idx); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + irq_info.irq_id); + + return UNF_RETURN_ERROR; + } + + v_scq_info->irq_id = (unsigned int)(irq_info.irq_id); + v_scq_info->msix_entry_idx = (unsigned short)(irq_info.msix_entry_idx); + + ret = snprintf(v_scq_info->irq_name, HIFC_IRQ_NAME_MAX - 1, + "fc_scq%u_%x_msix%u", v_scq_info->queue_id, + hba->port_cfg.port_id, v_scq_info->msix_entry_idx); + UNF_FUNCTION_RETURN_CHECK(ret, HIFC_IRQ_NAME_MAX - 1); + /* 2. SCQ IRQ tasklet init */ + tasklet_init(&v_scq_info->tasklet, hifc_process_scq_cqe, + (unsigned long)v_scq_info); + + /* 3. Request IRQ for SCQ */ + ret = request_irq(v_scq_info->irq_id, hifc_scq_irq, 0UL, + v_scq_info->irq_name, v_scq_info); + hifc_set_msix_state(hba->hw_dev_handle, v_scq_info->msix_entry_idx, + HIFC_MSIX_ENABLE); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Request SCQ irq failed, SCQ Index = %u, return %d", + v_scq_info->queue_id, ret); + + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, + v_scq_info->irq_id); + memset(v_scq_info->irq_name, 0, HIFC_IRQ_NAME_MAX); + v_scq_info->irq_id = 0; + v_scq_info->msix_entry_idx = 0; + return UNF_RETURN_ERROR; + } + return RETURN_OK; +} + +static void hifc_free_scq_int(struct hifc_scq_info_s *v_scq_info) +{ + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, return); + + hba = (struct hifc_hba_s *)(v_scq_info->phba); + hifc_set_msix_state(hba->hw_dev_handle, v_scq_info->msix_entry_idx, + HIFC_MSIX_DISABLE); + free_irq(v_scq_info->irq_id, v_scq_info); + tasklet_kill(&v_scq_info->tasklet); + hifc_free_irq(hba->hw_dev_handle, SERVICE_T_FC, v_scq_info->irq_id); + memset(v_scq_info->irq_name, 0, HIFC_IRQ_NAME_MAX); + v_scq_info->irq_id = 0; + v_scq_info->msix_entry_idx = 0; +} + +static void hifc_init_scq_info(struct hifc_hba_s *v_hba, + struct cqm_queue_s *v_cqm_scq, + unsigned int queue_id, + struct hifc_scq_info_s **v_ppscq_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_cqm_scq, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_ppscq_info, return); + + *v_ppscq_info = &v_hba->scq_info[queue_id]; + (*v_ppscq_info)->queue_id = queue_id; + (*v_ppscq_info)->scqn = v_cqm_scq->index; + (*v_ppscq_info)->phba = (void *)v_hba; + + (*v_ppscq_info)->cqm_scq_info = v_cqm_scq; + (*v_ppscq_info)->wqe_num_per_buf = v_cqm_scq->q_room_buf_1.buf_size / + HIFC_SCQE_SIZE; + (*v_ppscq_info)->wqe_size = HIFC_SCQE_SIZE; + + (*v_ppscq_info)->valid_wqe_num = (HIFC_SCQ_IS_STS(queue_id) ? + HIFC_STS_SCQ_DEPTH : HIFC_CMD_SCQ_DEPTH); + (*v_ppscq_info)->scqc_cq_depth = (HIFC_SCQ_IS_STS(queue_id) ? + HIFC_STS_SCQC_CQ_DEPTH : HIFC_CMD_SCQC_CQ_DEPTH); + (*v_ppscq_info)->scqc_ci_type = (HIFC_SCQ_IS_STS(queue_id) ? + HIFC_STS_SCQ_CI_TYPE : HIFC_CMD_SCQ_CI_TYPE); + + (*v_ppscq_info)->ci = 0; + (*v_ppscq_info)->ci_owner = 1; +} + +static void hifc_init_scq_header(struct hifc_wq_header_s *v_queue_header) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_queue_header, return); + + memset(v_queue_header, 0, sizeof(struct hifc_wq_header_s)); + + /* Obit default is 1 */ + v_queue_header->db_record.pmsn = 1 << 15; + v_queue_header->db_record.dump_pmsn = + v_queue_header->db_record.pmsn; + v_queue_header->ci_record.cmsn = 1 << 15; + v_queue_header->ci_record.dump_cmsn = + v_queue_header->ci_record.cmsn; + + /* Big endian convert */ + hifc_cpu_to_big64((void *)v_queue_header, + sizeof(struct hifc_wq_header_s)); +} + +static void hifc_cfg_scq_ctx(struct hifc_scq_info_s *v_scq_info, + struct hifcoe_cq_qinfo_s *v_scq_ctx) +{ + struct cqm_queue_s *cqm_scq_info = NULL; + struct hifc_queue_info_bus_s queue_bus; + unsigned long long parity = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scq_info, return); + + cqm_scq_info = v_scq_info->cqm_scq_info; + + v_scq_ctx->pcie_template_hi = 0; + v_scq_ctx->cur_cqe_gpa = + cqm_scq_info->q_room_buf_1.buf_list->pa >> HIFC_CQE_GPA_SHIFT; + v_scq_ctx->pi = 0; + v_scq_ctx->pi_o = 1; + v_scq_ctx->ci = v_scq_info->ci; + v_scq_ctx->ci_o = v_scq_info->ci_owner; + v_scq_ctx->c_eqn_msi_x = v_scq_info->msix_entry_idx; + v_scq_ctx->ci_type = v_scq_info->scqc_ci_type; + v_scq_ctx->cq_depth = v_scq_info->scqc_cq_depth; + v_scq_ctx->armq = HIFC_ARMQ_IDLE; + v_scq_ctx->cur_cqe_cnt = 0; + v_scq_ctx->cqe_max_cnt = 0; + v_scq_ctx->cqe_dmaattr_idx = 0; + v_scq_ctx->cq_so_ro = 0; + v_scq_ctx->init_mode = HIFC_CQ_INT_MODE; + v_scq_ctx->next_o = 1; + v_scq_ctx->loop_o = 1; + v_scq_ctx->next_cq_wqe_page_gpa = + cqm_scq_info->q_room_buf_1.buf_list[1].pa >> + HIFC_NEXT_CQE_GPA_SHIFT; + v_scq_ctx->pcie_template_lo = 0; + + v_scq_ctx->ci_gpa = (cqm_scq_info->q_header_paddr + + offsetof(struct hifc_wq_header_s, ci_record)) >> + HIFC_CQE_GPA_SHIFT; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + /* bits 20 */ + queue_bus.bus[0] |= + ((unsigned long long)(v_scq_info->scqn & 0xfffff)); + /* bits 3 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->pcie_template_lo)) << 20); + /* bits 28 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->ci_gpa & 0xfffffff)) << 23); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->cqe_dmaattr_idx)) << 51); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->cq_so_ro)) << 57); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->init_mode)) << 59); + /* bits 3 */ + queue_bus.bus[0] |= + (((unsigned long long)(v_scq_ctx->c_eqn_msi_x & 0x7)) << 61); + /* bits 7 */ + queue_bus.bus[1] |= + ((unsigned long long)(v_scq_ctx->c_eqn_msi_x >> 3)); + /* bits 1 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->ci_type)) << 7); + /* bits 3 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->cq_depth)) << 8); + /* bits 8 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->cqe_max_cnt)) << 11); + /* bits 3 */ + queue_bus.bus[1] |= + (((unsigned long long)(v_scq_ctx->pcie_template_hi)) << 19); + + parity = hifc_get_parity_value(queue_bus.bus, HIFC_SCQC_BUS_ROW, + HIFC_SCQC_BUS_COL); + v_scq_ctx->parity_0 = parity & 0x1; + v_scq_ctx->parity_1 = (parity >> 0x1) & 0x1; + v_scq_ctx->parity_2 = (parity >> 0x2) & 0x1; + + hifc_cpu_to_big64((void *)v_scq_ctx, sizeof(struct hifcoe_cq_qinfo_s)); +} + +static unsigned int hifc_create_scqc_via_cmdq_sync( + struct hifc_hba_s *v_hba, + struct hifcoe_cq_qinfo_s *v_scqc, + unsigned int scqn) +{ +#define HIFC_INIT_SCQC_TIMEOUT 3000 + + int ret; + unsigned int cvt_size; + struct hifcoe_cmdqe_creat_scqc_s init_scqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf alloc failed"); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SCQC); + return UNF_RETURN_ERROR; + } + + memset(&init_scqc_cmd, 0, sizeof(init_scqc_cmd)); + init_scqc_cmd.wd0.task_type = HIFCOE_TASK_T_INIT_SCQC; + init_scqc_cmd.wd1.scqn = HIFC_LSW(scqn); + cvt_size = sizeof(init_scqc_cmd) - sizeof(init_scqc_cmd.scqc); + hifc_cpu_to_big32(&init_scqc_cmd, cvt_size); + + /* v_scqc is already big endian */ + memcpy(init_scqc_cmd.scqc, v_scqc, sizeof(*v_scqc)); + memcpy(cmdq_in_buf->buf, &init_scqc_cmd, sizeof(init_scqc_cmd)); + cmdq_in_buf->size = sizeof(init_scqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, + cmdq_in_buf, NULL, HIFC_INIT_SCQC_TIMEOUT); + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + if (ret) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send creat scqc via cmdq failed, ret=%d", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SCQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SCQC); + + return RETURN_OK; +} + +static unsigned int hifc_create_scq(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int scq_index = 0; + unsigned int scq_cfg_num = 0; + struct cqm_queue_s *cqm_scq = NULL; + void *handle = NULL; + struct hifc_scq_info_s *scq_info = NULL; + struct hifcoe_cq_qinfo_s scq_ctx_info; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + handle = v_hba->hw_dev_handle; + + /* Create SCQ by CQM interface */ + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) { + /* + * 1. Create/Allocate SCQ + * + * Notice: SCQ[0, 2, 4 ...]--->CMD SCQ, + * SCQ[1, 3, 5 ...]--->STS SCQ, SCQ[HIFC_TOTAL_SCQ_NUM-1] + * --->Defaul SCQ + */ + cqm_scq = cqm_object_nonrdma_queue_create( + handle, + CQM_OBJECT_NONRDMA_SCQ, + HIFC_SCQ_IS_STS(scq_index) ? + HIFC_STS_SCQ_DEPTH : + HIFC_CMD_SCQ_DEPTH, + HIFC_SCQE_SIZE, + v_hba); + if (!cqm_scq) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Create scq failed"); + + goto free_scq; + } + + /* 2. Initialize SCQ (info) */ + hifc_init_scq_info(v_hba, cqm_scq, scq_index, &scq_info); + + /* 3. Allocate & Initialize SCQ interrupt */ + ret = hifc_alloc_scq_int(scq_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Allocate scq interrupt failed"); + + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + goto free_scq; + } + + /* 4. Initialize SCQ queue header */ + hifc_init_scq_header( + (struct hifc_wq_header_s *) + (void *)cqm_scq->q_header_vaddr); + + /* 5. Initialize & Create SCQ CTX */ + memset(&scq_ctx_info, 0, sizeof(scq_ctx_info)); + hifc_cfg_scq_ctx(scq_info, &scq_ctx_info); + ret = hifc_create_scqc_via_cmdq_sync(v_hba, + &scq_ctx_info, + scq_info->scqn); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Create scq context failed"); + + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + goto free_scq; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Create SCQ[%u] Scqn=%u WqeNum=%u WqeSize=%u WqePerBuf=%u CqDepth=%u CiType=%u irq=%u msix=%u", + scq_info->queue_id, scq_info->scqn, + scq_info->valid_wqe_num, scq_info->wqe_size, + scq_info->wqe_num_per_buf, scq_info->scqc_cq_depth, + scq_info->scqc_ci_type, scq_info->irq_id, + scq_info->msix_entry_idx); + } + + /* + * Last SCQ is used to handle SCQE delivery access when clearing buffer + */ + v_hba->default_scqn = scq_info->scqn; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Default Scqn=%d CqmScqIndex=%u", + v_hba->default_scqn, cqm_scq->index); + + return RETURN_OK; + +free_scq: + hifc_flush_scq_ctx(v_hba); + + scq_cfg_num = scq_index; + for (scq_index = 0; scq_index < scq_cfg_num; scq_index++) { + scq_info = &v_hba->scq_info[scq_index]; + hifc_free_scq_int(scq_info); + cqm_scq = scq_info->cqm_scq_info; + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + } + + return UNF_RETURN_ERROR; +} + +static void hifc_destroy_scq(struct hifc_hba_s *v_hba) +{ + unsigned int scq_index = 0; + struct cqm_queue_s *cqm_scq = NULL; + struct hifc_scq_info_s *scq_info = NULL; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Start destroy total %d SCQ", HIFC_TOTAL_SCQ_NUM); + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + /* Use CQM to delete SCQ */ + for (scq_index = 0; scq_index < HIFC_TOTAL_SCQ_NUM; scq_index++) { + scq_info = &v_hba->scq_info[scq_index]; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ALL, + "[info]Destroy SCQ%u, Scqn=%u, Irq=%u, msix=%u, name=%s", + scq_index, scq_info->scqn, scq_info->irq_id, + scq_info->msix_entry_idx, scq_info->irq_name); + + hifc_free_scq_int(scq_info); + cqm_scq = scq_info->cqm_scq_info; + cqm_object_delete(&cqm_scq->object); + memset(scq_info, 0, sizeof(struct hifc_scq_info_s)); + } +} + +static void hifc_init_srq_info(struct hifc_hba_s *v_hba, + struct cqm_queue_s *v_cqm_srq, + struct hifc_srq_info_s *v_srq_info) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_cqm_srq, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + + v_srq_info->phba = (void *)v_hba; + + v_srq_info->cqm_srq_info = v_cqm_srq; + v_srq_info->wqe_num_per_buf = v_cqm_srq->q_room_buf_1.buf_size / + HIFC_SRQE_SIZE - 1; + v_srq_info->wqe_size = HIFC_SRQE_SIZE; + v_srq_info->valid_wqe_num = v_cqm_srq->valid_wqe_num; + v_srq_info->pi = 0; + v_srq_info->pi_owner = HIFC_SRQ_INIT_LOOP_O; + v_srq_info->pmsn = 0; + v_srq_info->srqn = v_cqm_srq->index; + v_srq_info->first_rqe_rcv_dma = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Init srq info(srq index 0x%x) valid wqe num 0x%x, buffer size 0x%x, wqe num per buf 0x%x", + v_cqm_srq->index, v_srq_info->valid_wqe_num, + v_cqm_srq->q_room_buf_1.buf_size, + v_srq_info->wqe_num_per_buf); +} + +static void hifc_init_srq_header(struct hifc_wq_header_s *v_queue_header) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_queue_header, return); + + memset(v_queue_header, 0, sizeof(struct hifc_wq_header_s)); +} + +static struct hifcoe_rqe_s *hifc_get_srq_entry( + struct hifc_srq_info_s *v_srq_info, + struct hifcoe_rqe_s **v_linked_rqe, + unsigned short position) +{ + unsigned int buf_id = 0; + unsigned int wqe_num_per_buf = 0; + unsigned short buf_offset = 0; + struct cqm_buf_list_s *buf = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return NULL); + + wqe_num_per_buf = v_srq_info->wqe_num_per_buf; + + buf_id = position / wqe_num_per_buf; + buf = &v_srq_info->cqm_srq_info->q_room_buf_1.buf_list[buf_id]; + buf_offset = position % ((unsigned short)wqe_num_per_buf); + + if (buf_offset + 1 == wqe_num_per_buf) + *v_linked_rqe = (struct hifcoe_rqe_s *)(buf->va) + + wqe_num_per_buf; + else + *v_linked_rqe = NULL; + + return (struct hifcoe_rqe_s *)(buf->va) + buf_offset; +} + +/** + * hifc_set_srq_wqe_owner_be - Assign a value to Owner Bit of WQE in the + * big-endian format of Wqe Page. + * @v_sqe_ctrl_in_wp: sqe ctrl wqe struct info for communicate with uncode + * @owner: owner value which need to set + */ +static void hifc_set_srq_wqe_owner_be( + struct hifcoe_wqe_ctrl_s *v_sqe_ctrl_in_wp, + unsigned int owner) +{ + struct hifcoe_wqe_ctrl_ch_s wqe_ctrl_ch; + + mb(); + + wqe_ctrl_ch.ctrl_ch_val = be32_to_cpu(v_sqe_ctrl_in_wp->ch.ctrl_ch_val); + wqe_ctrl_ch.wd0.owner = owner; + v_sqe_ctrl_in_wp->ch.ctrl_ch_val = cpu_to_be32(wqe_ctrl_ch.ctrl_ch_val); + + mb(); +} + +static void hifc_set_srq_link_wqe_owner_be(struct hifc_link_wqe_s *v_link_wqe, + unsigned int owner, + unsigned short pmsn) +{ + struct hifc_link_wqe_s local_lw; + + mb(); + local_lw.val_wd1 = be32_to_cpu(v_link_wqe->val_wd1); + local_lw.wd1.msn = pmsn; + local_lw.wd1.dump_msn = (local_lw.wd1.msn & 0x7fff); + v_link_wqe->val_wd1 = cpu_to_be32(local_lw.val_wd1); + + local_lw.val_wd0 = be32_to_cpu(v_link_wqe->val_wd0); + local_lw.wd0.o = owner; + v_link_wqe->val_wd0 = cpu_to_be32(local_lw.val_wd0); + mb(); +} + +void hifc_post_els_srq_wqe(struct hifc_srq_info_s *v_srq_info, + unsigned short buff_id) +{ + struct hifcoe_rqe_s *rqe = NULL; + struct hifcoe_rqe_s tmp_rqe; + struct hifcoe_rqe_s *linked_rqe = NULL; + struct hifc_wq_header_s *wq_header = NULL; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + buff_id < v_srq_info->valid_wqe_num, return); + + buff_entry = v_srq_info->els_buff_entry_head + buff_id; + + spin_lock(&v_srq_info->srq_spin_lock); + + /* Obtain RQE, not include link wqe */ + rqe = hifc_get_srq_entry(v_srq_info, &linked_rqe, v_srq_info->pi); + if (!rqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]post els srq,get srqe failed, valid wqe num 0x%x, pi 0x%x, pmsn 0x%x", + v_srq_info->valid_wqe_num, v_srq_info->pi, + v_srq_info->pmsn); + + spin_unlock(&v_srq_info->srq_spin_lock); + + return; + } + + /* Initialize RQE */ + /* cs section is not used */ + memset(&tmp_rqe, 0, sizeof(struct hifcoe_rqe_s)); + + /* default Obit is invalid, and set valid finally */ + hifc_build_srq_wqe_ctrls(&tmp_rqe, !v_srq_info->pi_owner, + v_srq_info->pmsn + 1); + + tmp_rqe.bds_sl.buf_addr_hi = HIFC_HIGH_32_BITS(buff_entry->buff_dma); + tmp_rqe.bds_sl.buf_addr_lo = HIFC_LOW_32_BITS(buff_entry->buff_dma); + tmp_rqe.drv_sl.wd0.user_id = buff_id; + + /* convert to big endian */ + hifc_cpu_to_big32(&tmp_rqe, sizeof(struct hifcoe_rqe_s)); + + memcpy(rqe, &tmp_rqe, sizeof(struct hifcoe_rqe_s)); + + /* reset Obit */ + hifc_set_srq_wqe_owner_be( + (struct hifcoe_wqe_ctrl_s *)(void *)&rqe->ctrl_sl, + v_srq_info->pi_owner); + + if (linked_rqe) { + /* Update Obit in linked WQE */ + hifc_set_srq_link_wqe_owner_be( + (struct hifc_link_wqe_s *)(void *)linked_rqe, + v_srq_info->pi_owner, + v_srq_info->pmsn + 1); + } + + /* Update PI and PMSN */ + hifc_update_producer_info((unsigned short)(v_srq_info->valid_wqe_num), + &v_srq_info->pi, + &v_srq_info->pi_owner); + + /* + * pmsn is 16bit. The value is added to the maximum value and is + * automatically reversed + */ + v_srq_info->pmsn++; + + /* Update pmsn in queue header */ + wq_header = (struct hifc_wq_header_s *) + (void *)v_srq_info->cqm_srq_info->q_header_vaddr; + hifc_update_srq_header(&wq_header->db_record, v_srq_info->pmsn); + + spin_unlock(&v_srq_info->srq_spin_lock); +} + +static void hifc_cfg_srq_ctx(struct hifc_srq_info_s *v_srq_info, + struct hifc_srq_ctx_s *v_srq_ctx, + unsigned int v_sge_size, + unsigned long long v_rqe_gpa) +{ + struct hifc_srq_ctx_s *srq_ctx = NULL; + struct cqm_queue_s *cqm_srq_info = NULL; + struct hifc_queue_info_bus_s queue_bus; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_ctx, return); + + cqm_srq_info = v_srq_info->cqm_srq_info; + srq_ctx = v_srq_ctx; + + srq_ctx->last_rq_pmsn = 0; + srq_ctx->cur_rqe_msn = 0; + srq_ctx->pcie_template = 0; + /* The value of CTX needs to be updated when RQE is configured */ + srq_ctx->cur_rqe_gpa = v_rqe_gpa; + srq_ctx->cur_sge_v = 0; + srq_ctx->cur_sge_l = 0; + /* The information received by the SRQ is reported through the SCQ. + * The interrupt and ArmCQ are disabled. + */ + srq_ctx->ceqn_msix = 0; + srq_ctx->int_mode = 0; + srq_ctx->cur_sge_remain_len = 0; + srq_ctx->cur_sge_id = 0; + srq_ctx->consant_sge_len = v_sge_size; + srq_ctx->cur_wqe = 0; + srq_ctx->pmsn_type = HIFC_PMSN_CI_TYPE_FROM_HOST; + srq_ctx->bdsl = 0; + srq_ctx->cr = 0; + srq_ctx->csl = 0; + srq_ctx->cf = 0; + srq_ctx->ctrl_sl = 0; + srq_ctx->cur_sge_gpa = 0; + srq_ctx->cur_pmsn_gpa = cqm_srq_info->q_header_paddr; + srq_ctx->pre_fetch_max_msn = 0; + srq_ctx->cqe_max_cnt = 0; + srq_ctx->cur_cqe_cnt = 0; + srq_ctx->arm_q = 0; + srq_ctx->cq_so_ro = 0; + srq_ctx->cqe_dma_attr_idx = 0; + srq_ctx->rq_so_ro = 0; + srq_ctx->rqe_dma_attr_idx = 0; + srq_ctx->loop_o = HIFC_SRQ_INIT_LOOP_O; + srq_ctx->ring = HIFC_QUEUE_RING; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + /* bits 60 */ + queue_bus.bus[0] |= + ((unsigned long long)(cqm_srq_info->q_ctx_paddr >> 4)); + /* bits 4 */ + queue_bus.bus[0] |= + (((unsigned long long)(srq_ctx->rqe_dma_attr_idx & 0xf)) << 60); + /* bits 2 */ + queue_bus.bus[1] |= + ((unsigned long long)(srq_ctx->rqe_dma_attr_idx >> 4)); + /* bits 2 */ + queue_bus.bus[1] |= (((unsigned long long)(srq_ctx->rq_so_ro)) << 2); + /* bits 60 */ + queue_bus.bus[1] |= + (((unsigned long long)(srq_ctx->cur_pmsn_gpa >> 4)) << 4); + /* bits 17 */ + queue_bus.bus[2] |= ((unsigned long long)(srq_ctx->consant_sge_len)); + /* bits 6 */ + queue_bus.bus[2] |= + (((unsigned long long)(srq_ctx->pcie_template)) << 17); + + srq_ctx->parity = hifc_get_parity_value((void *)queue_bus.bus, + HIFC_SRQC_BUS_ROW, + HIFC_SRQC_BUS_COL); + + hifc_cpu_to_big64((void *)srq_ctx, sizeof(struct hifc_srq_ctx_s)); +} + +static unsigned int hifc_create_srqc_via_cmdq_sync( + struct hifc_hba_s *v_hba, + struct hifc_srq_ctx_s *v_srqc, + unsigned long long v_ctx_gpa) +{ +#define HIFC_INIT_SRQC_TIMEOUT 3000 + + int ret; + unsigned int cvt_size; + struct hifcoe_cmdqe_creat_srqc_s init_srqc_cmd; + struct hifc_cmd_buf *cmdq_in_buf; + + cmdq_in_buf = hifc_alloc_cmd_buf(v_hba->hw_dev_handle); + if (!cmdq_in_buf) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]cmdq in_cmd_buf alloc failed"); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SRQC); + return UNF_RETURN_ERROR; + } + + memset(&init_srqc_cmd, 0, sizeof(init_srqc_cmd)); + init_srqc_cmd.wd0.task_type = HIFCOE_TASK_T_INIT_SRQC; + init_srqc_cmd.srqc_gpa_h = HIFC_HIGH_32_BITS(v_ctx_gpa); + init_srqc_cmd.srqc_gpa_l = HIFC_LOW_32_BITS(v_ctx_gpa); + cvt_size = sizeof(init_srqc_cmd) - sizeof(init_srqc_cmd.srqc); + hifc_cpu_to_big32(&init_srqc_cmd, cvt_size); + + /* v_srqc is already big-endian */ + memcpy(init_srqc_cmd.srqc, v_srqc, sizeof(*v_srqc)); + memcpy(cmdq_in_buf->buf, &init_srqc_cmd, sizeof(init_srqc_cmd)); + cmdq_in_buf->size = sizeof(init_srqc_cmd); + + ret = hifc_cmdq_detail_resp(v_hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, cmdq_in_buf, + NULL, HIFC_INIT_SRQC_TIMEOUT); + + hifc_free_cmd_buf(v_hba->hw_dev_handle, cmdq_in_buf); + + if (ret) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Send creat srqc via cmdq failed, ret=%d", ret); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SRQC); + return UNF_RETURN_ERROR; + } + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_INIT_SRQC); + + return RETURN_OK; +} + +static void hifc_init_els_srq_wqe(struct hifc_srq_info_s *v_srq_info) +{ + unsigned int rqe_index = 0; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_srq_info, return); + + for (rqe_index = 0; rqe_index < v_srq_info->valid_wqe_num - 1; + rqe_index++) { + buff_entry = v_srq_info->els_buff_entry_head + rqe_index; + + hifc_post_els_srq_wqe(v_srq_info, buff_entry->buff_id); + } +} + +static void hifc_free_els_srq_buff(struct hifc_hba_s *v_hba, + unsigned int srq_valid_wqe) +{ + unsigned int buff_index = 0; + struct hifc_srq_info_s *srq_info = NULL; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + srq_info = &v_hba->els_srq_info; + + if (!srq_info->els_buff_entry_head) + return; + + for (buff_index = 0; buff_index < srq_valid_wqe; buff_index++) { + buff_entry = &srq_info->els_buff_entry_head[buff_index]; + buff_entry->buff_addr = NULL; + } + + if (srq_info->buff_list.buflist) { + for (buff_index = 0; buff_index < srq_info->buff_list.buf_num; + buff_index++) { + if (srq_info->buff_list.buflist[buff_index].paddr) { + pci_unmap_single( + v_hba->pci_dev, + srq_info->buff_list.buflist[buff_index].paddr, + srq_info->buff_list.buf_size, + DMA_FROM_DEVICE); + srq_info->buff_list.buflist[buff_index].paddr = 0; + } + if (srq_info->buff_list.buflist[buff_index].vaddr) { + kfree(srq_info->buff_list.buflist[buff_index].vaddr); + srq_info->buff_list.buflist[buff_index].vaddr = NULL; + } + } + + kfree(srq_info->buff_list.buflist); + srq_info->buff_list.buflist = NULL; + } + + if (srq_info->els_buff_entry_head) { + kfree(srq_info->els_buff_entry_head); + srq_info->els_buff_entry_head = NULL; + } +} + +static unsigned int hifc_alloc_els_srq_buff(struct hifc_hba_s *v_hba, + unsigned int srq_valid_wqe) +{ + unsigned int req_buff_size = 0; + unsigned int buff_index = 0; + struct hifc_srq_info_s *srq_info = NULL; + struct hifc_srq_buff_entry_s *buff_entry = NULL; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int alloc_idx; + unsigned int cur_buf_idx = 0; + unsigned int cur_buf_offset = 0; + unsigned int buf_cnt_perhugebuf; + + srq_info = &v_hba->els_srq_info; + + /* Apply for entry buffer */ + req_buff_size = (unsigned int)(srq_valid_wqe * + sizeof(struct hifc_srq_buff_entry_s)); + srq_info->els_buff_entry_head = + (struct hifc_srq_buff_entry_s *)kmalloc(req_buff_size, + GFP_KERNEL); + if (!srq_info->els_buff_entry_head) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate ELS Srq receive buffer entrys failed"); + + return UNF_RETURN_ERROR; + } + memset(srq_info->els_buff_entry_head, 0, req_buff_size); + + buf_total_size = HIFC_SRQ_ELS_SGE_LEN * srq_valid_wqe; + + srq_info->buff_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? + BUF_LIST_PAGE_SIZE : buf_total_size; + buf_cnt_perhugebuf = + srq_info->buff_list.buf_size / HIFC_SRQ_ELS_SGE_LEN; + buf_num = srq_valid_wqe % buf_cnt_perhugebuf ? srq_valid_wqe / + buf_cnt_perhugebuf + 1 : srq_valid_wqe / + buf_cnt_perhugebuf; + srq_info->buff_list.buflist = (struct buff_list_s *) + kmalloc(buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + srq_info->buff_list.buf_num = buf_num; + + if (!srq_info->buff_list.buflist) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate ELS buf list failed out of memory"); + goto free_buff; + } + memset(srq_info->buff_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + srq_info->buff_list.buflist[alloc_idx].vaddr = + kmalloc(srq_info->buff_list.buf_size, GFP_KERNEL); + if (!srq_info->buff_list.buflist[alloc_idx].vaddr) + goto free_buff; + memset(srq_info->buff_list.buflist[alloc_idx].vaddr, 0, + srq_info->buff_list.buf_size); + + srq_info->buff_list.buflist[alloc_idx].paddr = + pci_map_single( + v_hba->pci_dev, + srq_info->buff_list.buflist[alloc_idx].vaddr, + srq_info->buff_list.buf_size, DMA_FROM_DEVICE); + if (pci_dma_mapping_error( + v_hba->pci_dev, + srq_info->buff_list.buflist[alloc_idx].paddr)) { + srq_info->buff_list.buflist[alloc_idx].paddr = 0; + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Map els srq buffer failed"); + + goto free_buff; + } + } + + /* Apply for receiving buffer and attach it to the free linked list */ + for (buff_index = 0; buff_index < srq_valid_wqe; buff_index++) { + buff_entry = &srq_info->els_buff_entry_head[buff_index]; + + cur_buf_idx = buff_index / buf_cnt_perhugebuf; + + cur_buf_offset = HIFC_SRQ_ELS_SGE_LEN * + (buff_index % buf_cnt_perhugebuf); + buff_entry->buff_addr = + srq_info->buff_list.buflist[cur_buf_idx].vaddr + + cur_buf_offset; + + buff_entry->buff_dma = + srq_info->buff_list.buflist[cur_buf_idx].paddr + + cur_buf_offset; + + buff_entry->buff_id = (unsigned short)buff_index; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[EVENT]Allocate bufnum:%u,buf_total_size:%u", + buf_num, buf_total_size); + + return RETURN_OK; + +free_buff: + hifc_free_els_srq_buff(v_hba, srq_valid_wqe); + return UNF_RETURN_ERROR; +} + +/** + * hifc_root_cmdq_enqueue - Send commands to the chip via ROOT CMDQ. + * @v_hba: hba handler to send cmd + * @v_cmdqe: cmdqe buff + * @cmd_len: cmdqe buff len + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_root_cmdq_enqueue(void *v_hba, union hifc_cmdqe_u *v_cmdqe, + unsigned short cmd_len) +{ + unsigned char wqe_type = 0; + int cmdq_ret = 0; + struct hifc_cmd_buf *cmdq_buf = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + wqe_type = (unsigned char)v_cmdqe->common.wd0.task_type; + HIFC_IO_STAT(hba, wqe_type); + + cmdq_buf = hifc_alloc_cmd_buf(hba->hw_dev_handle); + if (!cmdq_buf) { + HIFC_ERR_IO_STAT(hba, wqe_type); + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) CqmHandle(0x%p) allocate cmdq buffer failed", + hba->port_cfg.port_id, hba->hw_dev_handle); + + return UNF_RETURN_ERROR; + } + + memcpy(cmdq_buf->buf, v_cmdqe, cmd_len); + hifc_cpu_to_big32(cmdq_buf->buf, cmd_len); + cmdq_buf->size = cmd_len; + + cmdq_ret = hifc_cmdq_async(hba->hw_dev_handle, HIFC_ACK_TYPE_CMDQ, + HIFC_MOD_FCOE, 0, cmdq_buf); + + if (cmdq_ret != RETURN_OK) { + hifc_free_cmd_buf(hba->hw_dev_handle, cmdq_buf); + HIFC_ERR_IO_STAT(hba, wqe_type); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) CqmHandle(0x%p) send buff clear cmnd failed(0x%x)", + hba->port_cfg.port_id, hba->hw_dev_handle, cmdq_ret); + + return UNF_RETURN_ERROR; + } + UNF_REFERNCE_VAR(wqe_type); + return RETURN_OK; +} + +static void hifc_send_clear_srq_cmd(struct hifc_hba_s *v_hba, + struct hifc_srq_info_s *v_srq_info) +{ + union hifc_cmdqe_u cmdqe; + struct cqm_queue_s *cqm_fcp_srq = NULL; + unsigned long flag = 0; + + memset(&cmdqe, 0, sizeof(union hifc_cmdqe_u)); + + spin_lock_irqsave(&v_srq_info->srq_spin_lock, flag); + + cqm_fcp_srq = v_srq_info->cqm_srq_info; + if (!cqm_fcp_srq) { + v_srq_info->state = HIFC_CLEAN_DONE; + spin_unlock_irqrestore(&v_srq_info->srq_spin_lock, flag); + return; + } + + cmdqe.clear_srq.wd0.task_type = HIFCOE_TASK_T_CLEAR_SRQ; + cmdqe.clear_srq.wd1.scqn = HIFC_LSW(v_hba->default_scqn); + cmdqe.clear_srq.wd1.srq_type = v_srq_info->srq_type; + cmdqe.clear_srq.srqc_gpa_h = HIFC_HIGH_32_BITS( + cqm_fcp_srq->q_ctx_paddr); + cmdqe.clear_srq.srqc_gpa_l = HIFC_LOW_32_BITS(cqm_fcp_srq->q_ctx_paddr); + + (void)queue_delayed_work(v_hba->work_queue, + &v_srq_info->del_work, + (unsigned long)msecs_to_jiffies(( + unsigned int)HIFC_SRQ_DEL_STAGE_TIMEOUT_MS)); + + spin_unlock_irqrestore(&v_srq_info->srq_spin_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port 0x%x begin to clear srq 0x%x(0x%x,0x%llx)", + v_hba->port_cfg.port_id, v_srq_info->srq_type, + HIFC_LSW(v_hba->default_scqn), + (unsigned long long)cqm_fcp_srq->q_ctx_paddr); + + /* Run the ROOT CMDQ command to issue the clear srq command. + * If the command fails to be delivered, retry upon timeout. + */ + (void)hifc_root_cmdq_enqueue(v_hba, &cmdqe, sizeof(cmdqe.clear_srq)); +} + +static void hifc_srq_clr_time_out(struct work_struct *work) +{ + struct hifc_srq_info_s *srq = NULL; + struct hifc_hba_s *hba = NULL; + struct cqm_queue_s *cqm_fcp_imm_srq = NULL; + unsigned long flag = 0; + + srq = container_of(work, struct hifc_srq_info_s, del_work.work); + + spin_lock_irqsave(&srq->srq_spin_lock, flag); + hba = srq->phba; + cqm_fcp_imm_srq = srq->cqm_srq_info; + spin_unlock_irqrestore(&srq->srq_spin_lock, flag); + + if (hba && cqm_fcp_imm_srq) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port 0x%x clear srq 0x%x stat 0x%x timeout", + hba->port_cfg.port_id, srq->srq_type, srq->state); + + /* + * If the delivery fails or the execution times out after the + * delivery, try again once + */ + srq->del_retry_time++; + + if (srq->del_retry_time < 2) + hifc_send_clear_srq_cmd(hba, srq); + else + srq->del_retry_time = 0; + } +} + +static unsigned int hifc_create_els_srq(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct cqm_queue_s *cqm_srq = NULL; + struct hifc_wq_header_s *wq_header = NULL; + struct hifc_srq_info_s *srq_info = NULL; + struct hifc_srq_ctx_s srq_ctx = { 0 }; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + cqm_srq = cqm_object_fc_srq_create(v_hba->hw_dev_handle, + CQM_OBJECT_NONRDMA_SRQ, + HIFC_SRQ_ELS_DATA_DEPTH, + HIFC_SRQE_SIZE, + v_hba); + if (!cqm_srq) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Create Els Srq failed"); + + return UNF_RETURN_ERROR; + } + + /* Initialize SRQ */ + srq_info = &v_hba->els_srq_info; + hifc_init_srq_info(v_hba, cqm_srq, srq_info); + srq_info->srq_type = HIFC_SRQ_ELS; + srq_info->enable = UNF_TRUE; + srq_info->state = HIFC_CLEAN_DONE; + srq_info->del_retry_time = 0; + + /* The srq lock is initialized and can be created repeatedly */ + spin_lock_init(&srq_info->srq_spin_lock); + srq_info->spin_lock_init = UNF_TRUE; + + /* Initialize queue header */ + wq_header = (struct hifc_wq_header_s *)(void *)cqm_srq->q_header_vaddr; + hifc_init_srq_header(wq_header); + + INIT_DELAYED_WORK(&srq_info->del_work, hifc_srq_clr_time_out); + + /* Apply for RQ buffer */ + ret = hifc_alloc_els_srq_buff(v_hba, srq_info->valid_wqe_num); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate Els Srq buffer failed"); + + cqm_object_delete(&cqm_srq->object); + memset(srq_info, 0, sizeof(struct hifc_srq_info_s)); + return UNF_RETURN_ERROR; + } + + /* Fill RQE, update queue header */ + hifc_init_els_srq_wqe(srq_info); + + /* Fill SRQ CTX */ + memset(&srq_ctx, 0, sizeof(srq_ctx)); + hifc_cfg_srq_ctx(srq_info, &srq_ctx, HIFC_SRQ_ELS_SGE_LEN, + srq_info->cqm_srq_info->q_room_buf_1.buf_list->pa); + + ret = hifc_create_srqc_via_cmdq_sync( + v_hba, &srq_ctx, + srq_info->cqm_srq_info->q_ctx_paddr); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Creat Els Srqc failed"); + + hifc_free_els_srq_buff(v_hba, srq_info->valid_wqe_num); + cqm_object_delete(&cqm_srq->object); + memset(srq_info, 0, sizeof(struct hifc_srq_info_s)); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +void hifc_destroy_srq(void *v_hba) +{ + /* + * Receive clear els srq sts + * ---then--->>> destroy els srq + */ + struct hifc_hba_s *hba = NULL; + struct hifc_srq_info_s *srq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_hba, return); + + hba = (struct hifc_hba_s *)v_hba; + srq_info = &hba->els_srq_info; + + /* release receive buffer */ + hifc_free_els_srq_buff(hba, srq_info->valid_wqe_num); + + /* release srq info */ + if (srq_info->cqm_srq_info) { + cqm_object_delete(&srq_info->cqm_srq_info->object); + srq_info->cqm_srq_info = NULL; + } + if (srq_info->spin_lock_init) + srq_info->spin_lock_init = UNF_FALSE; + srq_info->phba = NULL; + srq_info->enable = UNF_FALSE; + srq_info->state = HIFC_CLEAN_DONE; +} + +/** + * hifc_create_srq - Create SRQ, which contains four SRQ for receiving + * instant data and a SRQ for receiving ELS data. + * @v_hba: hba handler + * @Return: 0 - success, negative - failure + */ +static unsigned int hifc_create_srq(struct hifc_hba_s *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + /* Create ELS SRQ */ + ret = hifc_create_els_srq(v_hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Create Els Srq failed"); + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +unsigned int hifc_create_common_share_queues(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + /* Create & Init 8 pairs SCQ */ + ret = hifc_create_scq(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Create scq failed"); + + return UNF_RETURN_ERROR; + } + + /* Alloc SRQ resource for SIRT & ELS */ + ret = hifc_create_srq(hba); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, "[err]Create srq failed"); + + hifc_flush_scq_ctx(hba); + hifc_destroy_scq(hba); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +void hifc_destroy_common_share_queues(void *v_hba) +{ + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, NULL != v_hba, return); + + hifc_destroy_scq((struct hifc_hba_s *)v_hba); + hifc_destroy_srq((struct hifc_hba_s *)v_hba); +} + +static unsigned char hifc_map_fcp_data_cos(struct hifc_hba_s *v_hba) +{ + unsigned char i = 0; + unsigned char min_cnt_index = HIFC_PACKET_COS_FC_DATA; + int get_init_index = UNF_FALSE; + + for (i = 0; i < HIFC_MAX_COS_NUM; i++) { + /* + * Check whether the CoS is valid for the FC and cannot be + * occupied by the CMD + */ + if ((!(v_hba->cos_bit_map & (1 << i))) || + (i == HIFC_PACKET_COS_FC_CMD)) { + continue; + } + + if (get_init_index == UNF_FALSE) { + min_cnt_index = i; + get_init_index = UNF_TRUE; + continue; + } + + if (atomic_read(&v_hba->cos_rport_cnt[i]) < + atomic_read(&v_hba->cos_rport_cnt[min_cnt_index])) { + min_cnt_index = i; + } + } + + atomic_inc(&v_hba->cos_rport_cnt[min_cnt_index]); + + return min_cnt_index; +} + +static void hifc_update_cos_rport_cnt(struct hifc_hba_s *v_hba, + unsigned char v_cos_index) +{ + if ((v_cos_index >= HIFC_MAX_COS_NUM) || + (v_cos_index == HIFC_PACKET_COS_FC_CMD) || + (!(v_hba->cos_bit_map & (1 << v_cos_index))) || + (atomic_read(&v_hba->cos_rport_cnt[v_cos_index]) == 0)) { + return; + } + + atomic_dec(&v_hba->cos_rport_cnt[v_cos_index]); +} + +void hifc_invalid_parent_sq(struct hifc_parent_sq_info_s *sq_info) +{ + sq_info->rport_index = INVALID_VALUE32; + sq_info->context_id = INVALID_VALUE32; + sq_info->sq_queue_id = INVALID_VALUE32; + sq_info->cache_id = INVALID_VALUE32; + sq_info->max_sqe_num = INVALID_VALUE32; + sq_info->wqe_num_per_buf = INVALID_VALUE32; + sq_info->wqe_size = HIFC_SCQE_SIZE; + sq_info->wqe_offset = INVALID_VALUE32; + sq_info->head_start_cmsn = HIFC_MAX_MSN; + sq_info->head_end_cmsn = HIFC_MAX_MSN; + sq_info->last_pmsn = INVALID_VALUE16; + sq_info->last_pi_owner = INVALID_VALUE16; + sq_info->local_port_id = INVALID_VALUE32; + sq_info->remote_port_id = INVALID_VALUE32; + sq_info->phba = NULL; + sq_info->del_start_jiff = INVALID_VALUE64; + sq_info->port_in_flush = UNF_FALSE; + sq_info->sq_in_sess_rst = UNF_FALSE; + sq_info->oqid_rd = INVALID_VALUE16; + sq_info->oqid_wr = INVALID_VALUE16; + sq_info->srq_ctx_addr = 0; + atomic_set(&sq_info->sq_cashed, UNF_FALSE); + sq_info->vport_id = 0; + sq_info->sirt_dif_control.protect_opcode = UNF_DIF_ACTION_NONE; + atomic_set(&sq_info->sq_valid, UNF_FALSE); + atomic_set(&sq_info->fush_done_wait_cnt, 0); + + memset(&sq_info->delay_sqe, 0, + sizeof(struct hifc_delay_sqe_ctrl_info_s)); + memset(sq_info->io_stat, 0, sizeof(sq_info->io_stat)); +} + +static void hifc_free_link_list_wpg(struct hifc_parent_sq_info_s *v_sq) +{ + unsigned long flag = 0; + struct hifc_hba_s *hba = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct list_head *entry_head_wqe_page = NULL; + struct hifc_sq_wqe_page_s *sq_wpg = NULL; + + hba = (struct hifc_hba_s *)v_sq->phba; + + list_for_each_safe(node, next_node, &v_sq->list_linked_list_sq) { + sq_wpg = list_entry(node, struct hifc_sq_wqe_page_s, entry_wpg); + memset((void *)sq_wpg->wpg_addr, WQE_MARKER_0, + hba->sq_wpg_pool.wpg_size); + + spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); + + entry_head_wqe_page = &sq_wpg->entry_wpg; + list_del(entry_head_wqe_page); + list_add_tail(entry_head_wqe_page, + &hba->sq_wpg_pool.list_free_wpg_pool); + + /* WqePage Pool counter */ + atomic_dec(&v_sq->wqe_page_cnt); + atomic_dec(&hba->sq_wpg_pool.wpg_in_use); + + spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); + } + + HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) Sq(0x%x) link list destroyed, Sq.WqePageCnt=0x%x, SqWpgPool.wpg_in_use=0x%x", + hba->port_cfg.port_id, v_sq->rport_index, v_sq->context_id, + atomic_read(&v_sq->wqe_page_cnt), + atomic_read(&hba->sq_wpg_pool.wpg_in_use)); +} + +static void hifc_free_parent_sq(struct hifc_hba_s *v_hba, + struct hifc_parent_queue_info_s *v_parentq_info) +{ + unsigned int ctx_flush_done = 0; + unsigned int *ctx_dw = NULL; + struct hifc_parent_sq_info_s *sq_info = NULL; + unsigned int delay_cnt = 0; + + sq_info = &v_parentq_info->parent_sq_info; + + /* Free data cos */ + hifc_update_cos_rport_cnt(v_hba, v_parentq_info->queue_data_cos); + + hifc_free_link_list_wpg(sq_info); + + if (sq_info->queue_header_original) { + pci_unmap_single(v_hba->pci_dev, + sq_info->queue_hdr_phy_addr_original, + sizeof(struct hifc_queue_header_s) + + HIFC_SQ_HEADER_ADDR_ALIGN_SIZE, + DMA_BIDIRECTIONAL); + kfree(sq_info->queue_header_original); + sq_info->queue_header_original = NULL; + } + + if (v_parentq_info->parent_ctx.cqm_parent_ctx_obj) { + ctx_dw = (unsigned int *)((void *)( + v_parentq_info->parent_ctx.cqm_parent_ctx_obj->vaddr)); + ctx_flush_done = ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + mb(); + if ((v_parentq_info->offload_state == + HIFC_QUEUE_STATE_DESTROYING) && (ctx_flush_done == 0)) { + do { + ctx_flush_done = + ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + mb(); + if (ctx_flush_done != 0) + break; + delay_cnt++; + } while (delay_cnt < 100); + + if (ctx_flush_done == 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) Rport(0x%x) flush done is not set", + v_hba->port_cfg.port_id, + sq_info->rport_index); + } + } + + cqm_object_delete( + &v_parentq_info->parent_ctx.cqm_parent_ctx_obj->object); + v_parentq_info->parent_ctx.cqm_parent_ctx_obj = NULL; + } + + hifc_invalid_parent_sq(sq_info); +} + +static inline struct hifcoe_sqe_s *hifc_get_wqe_page_entry( + struct hifc_sq_wqe_page_s *v_wpg, + unsigned int wqe_offset) +{ + struct hifcoe_sqe_s *wpg = NULL; + + wpg = (struct hifcoe_sqe_s *)(v_wpg->wpg_addr); + wpg += wqe_offset; + + return wpg; +} + +static struct hifc_sq_wqe_page_s *hifc_add_tail_wqe_page( + struct hifc_parent_sq_info_s *v_sq) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_sq_wqe_page_s *esgl = NULL; + struct list_head *free_list_head = NULL; + unsigned long flag = 0; + + hba = (struct hifc_hba_s *)v_sq->phba; + + spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); + + /* Get a WqePage from hba->sq_wpg_pool.list_free_wpg_pool, and add + * to v_sq.list_SqTailWqePage + */ + if (!list_empty(&hba->sq_wpg_pool.list_free_wpg_pool)) { + free_list_head = (&hba->sq_wpg_pool.list_free_wpg_pool)->next; + list_del(free_list_head); + list_add_tail(free_list_head, &v_sq->list_linked_list_sq); + esgl = list_entry(free_list_head, struct hifc_sq_wqe_page_s, + entry_wpg); + + /* WqePage Pool counter */ + atomic_inc(&hba->sq_wpg_pool.wpg_in_use); + } else { + esgl = NULL; + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]SQ pool is empty when SQ(0x%x) try to get wqe page", + v_sq->rport_index); + HIFC_HBA_STAT(hba, HIFC_STAT_SQ_POOL_EMPTY); + } + + spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); + + return esgl; +} + +static struct hifc_sq_wqe_page_s *hifc_add_one_wqe_page( + struct hifc_parent_sq_info_s *v_sq) +{ + unsigned int wqe_idx = 0; + struct hifc_sq_wqe_page_s *wqe_page = NULL; + struct hifcoe_sqe_s *sqe_in_wp = NULL; + struct hifc_link_wqe_s *link_wqe_in_wpg = NULL; + struct hifc_link_wqe_s link_wqe; + + /* Add a new Wqe Page */ + wqe_page = hifc_add_tail_wqe_page(v_sq); + + if (!wqe_page) + return NULL; + + for (wqe_idx = 0; wqe_idx <= v_sq->wqe_num_per_buf; wqe_idx++) { + sqe_in_wp = hifc_get_wqe_page_entry(wqe_page, wqe_idx); + sqe_in_wp->ctrl_sl.ch.ctrl_ch_val = 0; + } + + /* Set last WqePage as linkwqe */ + link_wqe_in_wpg = (struct hifc_link_wqe_s *) + hifc_get_wqe_page_entry(wqe_page, v_sq->wqe_num_per_buf); + link_wqe.val_wd0 = 0; + link_wqe.val_wd1 = 0; + link_wqe.next_page_addr_hi = 0; + link_wqe.next_page_addr_lo = 0; + link_wqe.wd0.wf = CQM_WQE_WF_LINK; + link_wqe.wd0.ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; + link_wqe.wd0.o = !(v_sq->last_pi_owner); + link_wqe.wd1.lp = CQM_LINK_WQE_LP_INVALID; + hifc_cpu_to_big32(&link_wqe, sizeof(struct hifc_link_wqe_s)); + memcpy(link_wqe_in_wpg, &link_wqe, sizeof(struct hifc_link_wqe_s)); + + return wqe_page; +} + +static void hifc_alloc_sq_oqid(struct hifc_hba_s *v_hba, + struct hifc_parent_sq_info_s *v_sq) +{ + unsigned short read_oqid = INVALID_VALUE16; + unsigned short write_oqid = INVALID_VALUE16; + unsigned short vf_id = INVALID_VALUE16; + unsigned short mask_value = hifc_host_oq_id_mask(v_hba->hw_dev_handle); + unsigned int cqm_xid = v_sq->context_id; + + vf_id = hifc_global_func_id(v_hba->hw_dev_handle); + + HIFC_OQID_RD((unsigned short)cqm_xid, vf_id, mask_value, read_oqid); + HIFC_OQID_WR((unsigned short)cqm_xid, vf_id, mask_value, write_oqid); + + v_sq->oqid_rd = read_oqid; + v_sq->oqid_wr = write_oqid; +} + +static void hifc_parent_sq_operate_time_out(struct work_struct *work) +{ + int free_sq = UNF_FALSE; + unsigned long flag = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + struct hifc_parent_queue_info_s *parent_queue = NULL; + struct hifc_hba_s *hba = NULL; + + HIFC_CHECK(INVALID_VALUE32, work, return); + + parent_sq = container_of(work, struct hifc_parent_sq_info_s, + del_work.work); + parent_queue = container_of(parent_sq, struct hifc_parent_queue_info_s, + parent_sq_info); + hba = (struct hifc_hba_s *)parent_sq->phba; + HIFC_CHECK(INVALID_VALUE32, hba, return); + + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + if (parent_queue->offload_state == HIFC_QUEUE_STATE_DESTROYING) { + free_sq = UNF_TRUE; + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "Port(0x%x) sq rport index(0x%x) local nportid(0x%x),remote nportid(0x%x) reset timeout.", + hba->port_cfg.port_id, + parent_sq->rport_index, + parent_sq->local_port_id, + parent_sq->remote_port_id); + } + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, flag); + + /* In the server scenario, if the connection deletion times out, you + * can only wait or perform the FLR operation on the port. If the FLR + * command is run, the fault diffusion mode will be used. + */ + if ((parent_queue->parent_sq_info.del_start_jiff > hba->reset_time) && + (parent_queue->parent_sq_info.del_start_jiff != INVALID_VALUE64) && + (hba->removing == UNF_FALSE)) { + /* There is nothing to do if session reset timeout */ + ; + } + + if (free_sq == UNF_TRUE) { + /* There is nothing to do if session reset timeout */ + ; + } +} + +static void hifc_parent_sq_wait_flush_done_time_out(struct work_struct *work) +{ + unsigned long flag = 0; + struct hifc_parent_sq_info_s *parent_sq = NULL; + struct hifc_parent_queue_info_s *parent_queue = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int ctx_flush_done; + unsigned int *ctx_dw = NULL; + int ret; + + HIFC_CHECK(INVALID_VALUE32, work, return); + + parent_sq = container_of(work, struct hifc_parent_sq_info_s, + flush_done_tmo_work.work); + + HIFC_CHECK(INVALID_VALUE32, parent_sq, return); + + parent_queue = container_of(parent_sq, struct hifc_parent_queue_info_s, + parent_sq_info); + hba = (struct hifc_hba_s *)parent_sq->phba; + HIFC_CHECK(INVALID_VALUE32, hba, return); + HIFC_CHECK(INVALID_VALUE32, parent_queue, return); + + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + + if (parent_queue->offload_state != HIFC_QUEUE_STATE_DESTROYING) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) sq rport index(0x%x) is not destroying status,offloadsts is %d", + hba->port_cfg.port_id, + parent_sq->rport_index, + parent_queue->offload_state); + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, + flag); + return; + } + + if (parent_queue->parent_ctx.cqm_parent_ctx_obj) { + ctx_dw = (unsigned int *)((void *) + (parent_queue->parent_ctx.cqm_parent_ctx_obj->vaddr)); + ctx_flush_done = + ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + if (ctx_flush_done == 0) { + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + + if (atomic_read(&parent_queue->parent_sq_info.fush_done_wait_cnt) < HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]Port(0x%x) sq rport index(0x%x) wait flush done timeout %d times", + hba->port_cfg.port_id, + parent_sq->rport_index, + atomic_read(&parent_queue->parent_sq_info.fush_done_wait_cnt)); + + atomic_inc(&parent_queue->parent_sq_info.fush_done_wait_cnt); + + /* Delay Free Sq info */ + ret = queue_delayed_work(hba->work_queue, + &parent_queue->parent_sq_info.flush_done_tmo_work, + (unsigned long)msecs_to_jiffies((unsigned int)HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS)); + if (ret == (int)false) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) rport(0x%x) queue delayed work failed iret:%d", + hba->port_cfg.port_id, + parent_sq->rport_index, + ret); + HIFC_HBA_STAT(hba, HIFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK); + } + + return; + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) sq rport index(0x%x) has wait flush done %d times,do not free sq", + hba->port_cfg.port_id, + parent_sq->rport_index, + atomic_read(&parent_queue->parent_sq_info.fush_done_wait_cnt)); + + HIFC_HBA_STAT(hba, HIFC_STAT_CTXT_FLUSH_DONE); + + return; + } + } + } + + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) sq rport index(0x%x) flush done bit is ok,free sq now", + hba->port_cfg.port_id, + parent_sq->rport_index); + + hifc_free_parent_queue_info(hba, parent_queue); +} + +unsigned int hifc_alloc_parent_sq( + struct hifc_hba_s *v_hba, + struct hifc_parent_queue_info_s *v_parentq_info, + struct unf_rport_info_s *v_rport_info) +{ + struct hifc_parent_sq_info_s *sq_ctrl = NULL; + struct hifc_sq_wqe_page_s *head_wpg = NULL; + struct cqm_qpc_mpt_s *prnt_ctx = NULL; + unsigned int queue_header_alloc_size = 0; + unsigned long flag = 0; + + /* Craete parent context via CQM */ + prnt_ctx = cqm_object_qpc_mpt_create(v_hba->hw_dev_handle, + CQM_OBJECT_SERVICE_CTX, + HIFC_CNTX_SIZE_256B, + v_parentq_info, + CQM_INDEX_INVALID); + if (!prnt_ctx) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Create parent context failed, CQM_INDEX is 0x%x", + CQM_INDEX_INVALID); + goto parent_create_fail; + } + v_parentq_info->parent_ctx.cqm_parent_ctx_obj = prnt_ctx; + + /* Initialize struct hifc_parent_sq_info_s */ + sq_ctrl = &v_parentq_info->parent_sq_info; + sq_ctrl->phba = (void *)v_hba; + sq_ctrl->rport_index = v_rport_info->rport_index; + sq_ctrl->context_id = prnt_ctx->xid; + sq_ctrl->sq_queue_id = HIFC_QID_SQ; + sq_ctrl->cache_id = INVALID_VALUE32; + sq_ctrl->max_sqe_num = v_hba->exit_count; + /* Reduce one Link Wqe */ + sq_ctrl->wqe_num_per_buf = v_hba->sq_wpg_pool.wqe_per_wpg - 1; + sq_ctrl->wqe_size = HIFC_SQE_SIZE; + sq_ctrl->wqe_offset = 0; + sq_ctrl->head_start_cmsn = 0; + sq_ctrl->head_end_cmsn = HIFC_GET_WP_END_CMSN(0, + sq_ctrl->wqe_num_per_buf); + sq_ctrl->last_pmsn = 0; + /* Linked List SQ Owner Bit 1 valid, 0 invalid */ + sq_ctrl->last_pi_owner = 1; + sq_ctrl->local_port_id = INVALID_VALUE32; + sq_ctrl->remote_port_id = INVALID_VALUE32; + sq_ctrl->sq_in_sess_rst = UNF_FALSE; + atomic_set(&sq_ctrl->sq_valid, UNF_TRUE); + sq_ctrl->del_start_jiff = INVALID_VALUE64; + sq_ctrl->service_type = HIFC_GET_SERVICE_TYPE(v_hba); + sq_ctrl->vport_id = 0; + sq_ctrl->sirt_dif_control.protect_opcode = UNF_DIF_ACTION_NONE; + hifc_alloc_sq_oqid(v_hba, sq_ctrl); + atomic_set(&sq_ctrl->fush_done_wait_cnt, 0); + + /* Check whether the HBA is in the Linkdown state. Note that + * offload_state must be in the non-FREE state. + */ + spin_lock_irqsave(&v_hba->flush_state_lock, flag); + sq_ctrl->port_in_flush = v_hba->in_flushing; + spin_unlock_irqrestore(&v_hba->flush_state_lock, flag); + + INIT_LIST_HEAD(&sq_ctrl->list_linked_list_sq); + atomic_set(&sq_ctrl->wqe_page_cnt, 0); + atomic_set(&sq_ctrl->sq_dbl_cnt, 0); + atomic_set(&sq_ctrl->sqe_minus_cqe_cnt, 1); + atomic_set(&sq_ctrl->sq_wqe_cnt, 0); + atomic_set(&sq_ctrl->sq_cqe_cnt, 0); + memset(sq_ctrl->io_stat, 0, sizeof(sq_ctrl->io_stat)); + + INIT_DELAYED_WORK(&sq_ctrl->del_work, hifc_parent_sq_operate_time_out); + INIT_DELAYED_WORK(&sq_ctrl->flush_done_tmo_work, + hifc_parent_sq_wait_flush_done_time_out); + + memset(&sq_ctrl->delay_sqe, 0, + sizeof(struct hifc_delay_sqe_ctrl_info_s)); + + /* Allocate and initialize the Queue Header space. 64B alignment is + * required. Additional 64B is applied for alignment + */ + queue_header_alloc_size = sizeof(struct hifc_queue_header_s) + + HIFC_SQ_HEADER_ADDR_ALIGN_SIZE; + sq_ctrl->queue_header_original = kmalloc(queue_header_alloc_size, + GFP_ATOMIC); + if (!sq_ctrl->queue_header_original) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]RPort(0x%x) create SQ queue header failed", + v_rport_info->rport_index); + goto qheader_create_fail; + } + + memset((unsigned char *)sq_ctrl->queue_header_original, 0, + queue_header_alloc_size); + + sq_ctrl->queue_hdr_phy_addr_original = pci_map_single( + v_hba->pci_dev, + sq_ctrl->queue_header_original, + queue_header_alloc_size, + DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(v_hba->pci_dev, + sq_ctrl->queue_hdr_phy_addr_original)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]RPort(0x%x) SQ queue header DMA mapping failed", + v_rport_info->rport_index); + goto qheader_dma_map_fail; + } + + /* Obtains the 64B alignment address */ + sq_ctrl->queue_header = (struct hifc_queue_header_s *) + HIFC_ADDR_64_ALIGN( + (unsigned long long) + (sq_ctrl->queue_header_original)); + sq_ctrl->queue_hdr_phy_addr = + HIFC_ADDR_64_ALIGN(sq_ctrl->queue_hdr_phy_addr_original); + + /* Each SQ is allocated with a Wqe Page by default. The WqePageCnt is + * incremented by one + */ + head_wpg = hifc_add_one_wqe_page(sq_ctrl); + if (!head_wpg) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]RPort(0x%x) create SQ first wqe page failed", + v_rport_info->rport_index); + goto headwpg_create_fail; + } + + atomic_inc(&sq_ctrl->wqe_page_cnt); + + return RETURN_OK; + +headwpg_create_fail: + pci_unmap_single(v_hba->pci_dev, sq_ctrl->queue_hdr_phy_addr_original, + queue_header_alloc_size, DMA_BIDIRECTIONAL); + +qheader_dma_map_fail: + kfree(sq_ctrl->queue_header_original); + sq_ctrl->queue_header_original = NULL; + +qheader_create_fail: + cqm_object_delete(&prnt_ctx->object); + +parent_create_fail: + v_parentq_info->parent_ctx.cqm_parent_ctx_obj = NULL; + + return UNF_RETURN_ERROR; +} + +static void hifc_init_prnt_ctx_sq_qinfo( + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifc_parent_sq_info_s *sq = NULL; + struct hifc_sq_wqe_page_s *head_wqe_page = NULL; + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_sq_qinfo_s *parent_sq_ctx = NULL; + struct hifc_queue_info_bus_s queue_bus; + + /* Obtains the Parent Context address */ + sq = &v_parent_qinfo->parent_sq_info; + ctx = (struct hifcoe_parent_context_s *)(void *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + head_wqe_page = HIFC_GET_SQ_HEAD(sq); + + parent_sq_ctx = &ctx->sq_qinfo; + + /* The PMSN is updated by the host driver */ + parent_sq_ctx->pmsn_type = HIFC_PMSN_CI_TYPE_FROM_HOST; + + /* Indicates the value of O of the valid SQE in the current round of SQ. + * The value of Linked List SQ is always one, and the value of 0 is + * invalid. + */ + /* current valid o-bit */ + parent_sq_ctx->loop_o = HIFC_OWNER_DRIVER_PRODUCT; + + /* should be opposite from loop_o */ + parent_sq_ctx->cur_wqe_o = ~(parent_sq_ctx->loop_o); + + /* the first sqe's gpa */ + parent_sq_ctx->cur_sqe_gpa = head_wqe_page->wpg_phy_addr; + + /* Indicates the GPA of the Queue header that is initialized to the SQ + * in the Host memory. The value must be 16-byte aligned. + */ + parent_sq_ctx->pmsn_gpa = sq->queue_hdr_phy_addr; + if (wqe_pre_load != 0) + parent_sq_ctx->pmsn_gpa |= HIFC_SQ_LINK_PRE; + + /* + * This field is used to fill in the dmaattr_idx field of the ComboDMA. + * The default value is 0 + */ + parent_sq_ctx->sqe_dmaattr_idx = HIFC_DMA_ATTR_OFST; + + /* + * This field is filled using the value of RO_SO in the SGL0 of + * the ComboDMA + */ + parent_sq_ctx->sq_so_ro = HIFC_PCIE_RELAXED_ORDERING; + + parent_sq_ctx->ring = HIFC_QUEUE_LINK_STYLE; + + /* This field is used to set the SGL0 field of the Child solicDMA */ + parent_sq_ctx->zerocopy_dmaattr_idx = HIFC_DMA_ATTR_OFST; + + parent_sq_ctx->zerocopy_so_ro = HIFC_PCIE_RELAXED_ORDERING; + + /* PCIe attribute information */ + parent_sq_ctx->pcie_template = HIFC_PCIE_TEMPLATE; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + /* bits 20 */ + queue_bus.bus[0] |= ((unsigned long long)(sq->context_id & 0xfffff)); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->sqe_dmaattr_idx)) << 20); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->sq_so_ro)) << 26); + /* bits 1 */ + queue_bus.bus[0] |= (((unsigned long long)(parent_sq_ctx->ring)) << 28); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->zerocopy_dmaattr_idx)) + << 29); + /* bits 2 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->zerocopy_so_ro)) << 35); + /* bits 6 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->pcie_template)) << 37); + /* bits 21 */ + queue_bus.bus[0] |= + (((unsigned long long)(parent_sq_ctx->pmsn_gpa >> 4)) << 43); + /* bits 39 */ + queue_bus.bus[1] |= + ((unsigned long long)(parent_sq_ctx->pmsn_gpa >> 25)); + /* bits 1 */ + queue_bus.bus[1] |= + (((unsigned long long)(parent_sq_ctx->pmsn_type)) << 39); + + parent_sq_ctx->parity = + hifc_get_parity_value(queue_bus.bus, HIFC_SQC_BUS_ROW, + HIFC_SQC_BUS_COL); + + hifc_cpu_to_big64(parent_sq_ctx, sizeof(struct hifcoe_sq_qinfo_s)); +} + +static void hifc_init_parent_ctx_sqc_qinfo( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + unsigned int resp_scqn = 0; + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_scq_qinfo_s *resp_parent_scq_ctx = NULL; + struct hifc_queue_info_bus_s queue_bus; + + /* + * Obtains the queue id of the scq returned by the CQM when the SCQ + * is created + */ + resp_scqn = v_parent_qinfo->parent_sts_scq_info.cqm_queue_id; + + /* Obtains the Parent Context address */ + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + + resp_parent_scq_ctx = &ctx->resp_scq_qinfo; + resp_parent_scq_ctx->hw_scqc_config.info.rq_th2_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.rq_th1_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.rq_th0_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.rq_min_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_th2_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_th1_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_th0_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.sq_min_preld_cache_num = + wqe_pre_load; + resp_parent_scq_ctx->hw_scqc_config.info.scq_n = + (unsigned long long)resp_scqn; + resp_parent_scq_ctx->hw_scqc_config.info.parity = 0; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + queue_bus.bus[0] = resp_parent_scq_ctx->hw_scqc_config.pctxt_val1; + resp_parent_scq_ctx->hw_scqc_config.info.parity = + hifc_get_parity_value( + queue_bus.bus, + HIFC_HW_SCQC_BUS_ROW, + HIFC_HW_SCQC_BUS_COL); + + hifc_cpu_to_big64(resp_parent_scq_ctx, + sizeof(struct hifcoe_scq_qinfo_s)); +} + +static void hifc_init_parent_ctx_srq_qinfo( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifc_hba_s *hba = NULL; + struct hifcoe_parent_context_s *ctx = NULL; + struct cqm_queue_s *cqm_els_srq = NULL; + struct hifc_parent_sq_info_s *sq = NULL; + struct hifc_queue_info_bus_s queue_bus; + + /* Obtains the SQ address */ + sq = &v_parent_qinfo->parent_sq_info; + + /* Obtains the Parent Context address */ + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + + hba = (struct hifc_hba_s *)v_hba; + cqm_els_srq = hba->els_srq_info.cqm_srq_info; + + /* Initialize the Parent SRQ INFO used when the ELS is received */ + ctx->els_srq_info.srqc_gpa = cqm_els_srq->q_ctx_paddr >> 4; + + memset(&queue_bus, 0, sizeof(struct hifc_queue_info_bus_s)); + queue_bus.bus[0] = ctx->els_srq_info.srqc_gpa; + ctx->els_srq_info.parity = hifc_get_parity_value( + queue_bus.bus, + HIFC_HW_SRQC_BUS_ROW, + HIFC_HW_SRQC_BUS_COL); + + hifc_cpu_to_big64(&ctx->els_srq_info, + sizeof(struct hifcoe_srq_qinfo_s)); + + ctx->imm_srq_info.srqc_gpa = 0; + sq->srq_ctx_addr = 0; +} + +static void hifc_init_parent_rsvd_qinfo( + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_hw_rsvd_queue_s *hw_rsvd_qinfo = NULL; + unsigned short max_seq = 0; + unsigned int each = 0, seq_index = 0; + + /* Obtains the Parent Context address */ + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + hw_rsvd_qinfo = (struct hifcoe_hw_rsvd_queue_s *)&ctx->hw_rsvdq; + memset(hw_rsvd_qinfo->seq_id_bitmap, 0, + sizeof(hw_rsvd_qinfo->seq_id_bitmap)); + + max_seq = HIFC_HRQI_SEQ_ID_MAX; + + /* special set for sequence id 0, which is always kept by ucode for + * sending fcp-cmd + */ + hw_rsvd_qinfo->seq_id_bitmap[HIFC_HRQI_SEQ_SEPCIAL_ID] = 1; + seq_index = HIFC_HRQI_SEQ_SEPCIAL_ID - + (max_seq >> HIFC_HRQI_SEQ_INDEX_SHIFT); + + /* Set the unavailable mask to start from max + 1 */ + for (each = (max_seq % HIFC_HRQI_SEQ_INDEX_MAX) + 1; + each < HIFC_HRQI_SEQ_INDEX_MAX; each++) { + hw_rsvd_qinfo->seq_id_bitmap[seq_index] |= 0x1 << each; + } + + hw_rsvd_qinfo->seq_id_bitmap[seq_index] = + cpu_to_be64(hw_rsvd_qinfo->seq_id_bitmap[seq_index]); + + /* sepcial set for sequence id 0 */ + if (seq_index != HIFC_HRQI_SEQ_SEPCIAL_ID) { + hw_rsvd_qinfo->seq_id_bitmap[HIFC_HRQI_SEQ_SEPCIAL_ID] = + cpu_to_be64( + hw_rsvd_qinfo->seq_id_bitmap[HIFC_HRQI_SEQ_SEPCIAL_ID]); + } + + for (each = 0; each < seq_index; each++) + hw_rsvd_qinfo->seq_id_bitmap[each] = HIFC_HRQI_SEQ_INVALID_ID; + + /* no matter what the range of seq id, last_req_seq_id is fixed + * value 0xff + */ + hw_rsvd_qinfo->wd0.last_req_seq_id = HIFC_HRQI_SEQ_ID_MAX; + hw_rsvd_qinfo->wd0.xid = v_parent_qinfo->parent_sq_info.context_id; + + *(unsigned long long *)&hw_rsvd_qinfo->wd0 = + cpu_to_be64(*(unsigned long long *)&hw_rsvd_qinfo->wd0); +} + +static void hifc_init_oqid_in_ctx( + struct hifcoe_parent_context_s *v_parent_ctx, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + v_parent_ctx->sw_section.oqid_rd = + cpu_to_be16(v_parent_qinfo->parent_sq_info.oqid_rd); + v_parent_ctx->sw_section.oqid_wr = + cpu_to_be16(v_parent_qinfo->parent_sq_info.oqid_wr); +} + +static void hifc_init_parent_sw_section_info( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ +#define HIFC_VLAN_ENABLE (1) + + unsigned short rport_index; + struct hifc_hba_s *hba = NULL; + struct hifcoe_parent_context_s *ctx = NULL; + struct hifcoe_sw_section_s *sw_section = NULL; + + /* Obtains the Parent Context address */ + hba = (struct hifc_hba_s *)v_hba; + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + sw_section = &ctx->sw_section; + + /* xid+vPortId */ + sw_section->sw_ctxt_vport_xid.xid = + v_parent_qinfo->parent_sq_info.context_id; + sw_section->sw_ctxt_vport_xid.vport = + v_parent_qinfo->parent_sq_info.vport_id; + sw_section->sw_ctxt_vport_xid.csctrl = 0; + hifc_cpu_to_big32(&sw_section->sw_ctxt_vport_xid, + sizeof(sw_section->sw_ctxt_vport_xid)); + + /* conn_id */ + rport_index = HIFC_LSW(v_parent_qinfo->parent_sq_info.rport_index); + sw_section->conn_id = cpu_to_be16(rport_index); + + /* Immediate parameters */ + sw_section->immi_rq_page_size = 0; + + /* Parent SCQ INFO used for sending packets to the Cmnd */ + sw_section->scq_num_rcv_cmd = + cpu_to_be32(v_parent_qinfo->parent_cmd_scq_info.cqm_queue_id); + + /* sw_ctxt_misc */ + sw_section->sw_ctxt_misc.dw.srv_type = + v_parent_qinfo->parent_sq_info.service_type; + sw_section->sw_ctxt_misc.dw.port_id = hba->port_index; + + /* only the VN2VF mode is supported */ + sw_section->sw_ctxt_misc.dw.vlan_id = 0; + hifc_cpu_to_big32(&sw_section->sw_ctxt_misc.pctxt_val0, + sizeof(sw_section->sw_ctxt_misc.pctxt_val0)); + + /* oqid_rd, oqid_wr */ + hifc_init_oqid_in_ctx(ctx, v_parent_qinfo); + + /* Configuring the combo length */ + sw_section->per_xmit_data_size = cpu_to_be32(combo_length_kb * 1024); + + /* sw_ctxt_config */ + sw_section->sw_ctxt_config.dw.work_mode = HIFC_PORT_MODE_INI; + + sw_section->sw_ctxt_config.dw.status = FCOE_PARENT_STATUS_INVALID; + sw_section->sw_ctxt_config.dw.cos = hba->port_index; + sw_section->sw_ctxt_config.dw.oq_cos_cmd = HIFC_PACKET_COS_FC_CMD; + sw_section->sw_ctxt_config.dw.oq_cos_data = + v_parent_qinfo->queue_data_cos; + sw_section->sw_ctxt_config.dw.priority = 0; + sw_section->sw_ctxt_config.dw.vlan_enable = HIFC_VLAN_ENABLE; + sw_section->sw_ctxt_config.dw.sgl_num = dif_sgl_mode; + hifc_cpu_to_big32(&sw_section->sw_ctxt_config.pctxt_val1, + sizeof(sw_section->sw_ctxt_config.pctxt_val1)); + + hifc_cpu_to_big32(&sw_section->immi_dif_info, + sizeof(sw_section->immi_dif_info)); + + sw_section->cmd_scq_gpa_h = + HIFC_HIGH_32_BITS(hba->scq_info[v_parent_qinfo->parent_cmd_scq_info.local_queue_id].cqm_scq_info->q_header_paddr); + sw_section->cmd_scq_gpa_l = + HIFC_LOW_32_BITS(hba->scq_info[v_parent_qinfo->parent_cmd_scq_info.local_queue_id].cqm_scq_info->q_header_paddr); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) CmdLocalScqn(0x%x) QheaderGpaH(0x%x) QheaderGpaL(0x%x)", + hba->port_cfg.port_id, + v_parent_qinfo->parent_sq_info.rport_index, + v_parent_qinfo->parent_cmd_scq_info.local_queue_id, + sw_section->cmd_scq_gpa_h, + sw_section->cmd_scq_gpa_l); + + hifc_cpu_to_big32(&sw_section->cmd_scq_gpa_h, + sizeof(sw_section->cmd_scq_gpa_h)); + hifc_cpu_to_big32(&sw_section->cmd_scq_gpa_l, + sizeof(sw_section->cmd_scq_gpa_l)); +} + +void hifc_init_parent_ctx(void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo) +{ + struct hifcoe_parent_context_s *ctx = NULL; + + ctx = (struct hifcoe_parent_context_s *) + (v_parent_qinfo->parent_ctx.virt_parent_ctx); + + /* Initialize Parent Context */ + memset(ctx, 0, HIFC_CNTX_SIZE_256B); + + /* Initialize the Queue Info hardware area */ + hifc_init_prnt_ctx_sq_qinfo(v_parent_qinfo); + hifc_init_parent_ctx_sqc_qinfo(v_hba, v_parent_qinfo); + hifc_init_parent_ctx_srq_qinfo(v_hba, v_parent_qinfo); + hifc_init_parent_rsvd_qinfo(v_parent_qinfo); + + /* Initialize Software Section */ + hifc_init_parent_sw_section_info(v_hba, v_parent_qinfo); +} + +unsigned int hifc_get_rport_maped_cmd_scqn(void *phba, unsigned int rport_index) +{ + unsigned int cmd_scqn_local = 0; + struct hifc_hba_s *hba = (struct hifc_hba_s *)phba; + + cmd_scqn_local = HIFC_RPORTID_TO_CMD_SCQN(rport_index); + + return hba->scq_info[cmd_scqn_local].scqn; +} + +/** + * hifc_get_rport_maped_sts_scqn - Obtains the SCQ channel of RPort that is used + * to send STS. + * @v_hba: hba handle + * @rport_index: rport index + * @Return: related scqn value with rport index + */ +unsigned int hifc_get_rport_maped_sts_scqn(void *phba, unsigned int rport_index) +{ + unsigned int sts_scqn_local = 0; + struct hifc_hba_s *hba = (struct hifc_hba_s *)phba; + + sts_scqn_local = HIFC_RPORTID_TO_STS_SCQN(rport_index); + + return hba->scq_info[sts_scqn_local].scqn; +} + +void hifc_map_shared_queue_qid( + struct hifc_hba_s *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info, + unsigned int rport_index) +{ + unsigned int cmd_scqn_local = 0; + unsigned int sts_scqn_local = 0; + + /* The SCQ is used for each connection based on the balanced + * distribution of commands and responses + */ + cmd_scqn_local = HIFC_RPORTID_TO_CMD_SCQN(rport_index); + sts_scqn_local = HIFC_RPORTID_TO_STS_SCQN(rport_index); + v_parent_queue_info->parent_cmd_scq_info.local_queue_id = + cmd_scqn_local; + v_parent_queue_info->parent_sts_scq_info.local_queue_id = + sts_scqn_local; + v_parent_queue_info->parent_cmd_scq_info.cqm_queue_id = + v_hba->scq_info[cmd_scqn_local].scqn; + v_parent_queue_info->parent_sts_scq_info.cqm_queue_id = + v_hba->scq_info[sts_scqn_local].scqn; + + /* Each session share with immediate SRQ and ElsSRQ */ + v_parent_queue_info->parent_els_srq_info.local_queue_id = 0; + v_parent_queue_info->parent_els_srq_info.cqm_queue_id = + v_hba->els_srq_info.srqn; + + /* Allocate fcp data cos value */ + v_parent_queue_info->queue_data_cos = hifc_map_fcp_data_cos(v_hba); + + /* Allocate Parent SQ vPort */ + v_parent_queue_info->parent_sq_info.vport_id += + v_parent_queue_info->queue_vport_id; +} + +unsigned int hifc_alloc_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rport_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + if (!hba->parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) cannot find parent queue pool", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + if (v_rport_info->rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate parent resource failed, invlaid rport index(0x%x),rport nportid(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id); + + return UNF_RETURN_ERROR; + } + + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[v_rport_info->rport_index]; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + if (v_parent_queue_info->offload_state != HIFC_QUEUE_STATE_FREE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate parent resource failed, invlaid rport index(0x%x),rport nportid(0x%x), offload state(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id, + v_parent_queue_info->offload_state); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + return UNF_RETURN_ERROR; + } + + v_parent_queue_info->offload_state = HIFC_QUEUE_STATE_INITIALIZED; + + /* Create Parent Context and Link List SQ */ + ret = hifc_alloc_parent_sq(hba, v_parent_queue_info, v_rport_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "Port(0x%x) alloc sq resoure failed.rport index(0x%x),rport nportid(0x%x).", + hba->port_cfg.port_id, v_rport_info->rport_index, + v_rport_info->nport_id); + + v_parent_queue_info->offload_state = HIFC_QUEUE_STATE_FREE; + hifc_invalid_parent_sq(&v_parent_queue_info->parent_sq_info); + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + return UNF_RETURN_ERROR; + } + + /* Allocate the corresponding queue xid to each parent */ + hifc_map_shared_queue_qid(hba, v_parent_queue_info, + v_rport_info->rport_index); + + /* Initialize Parent Context, including hardware area and ucode area */ + hifc_init_parent_ctx(v_hba, v_parent_queue_info); + + spin_unlock_irqrestore(&v_parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) allocate parent sq success,rport index(0x%x),rport nportid(0x%x),context id(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.context_id); + + return ret; +} + +unsigned int hifc_free_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long flag = 0; + unsigned long rst_flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + enum hifc_session_reset_mode_e mode = + HIFC_SESS_RST_DELETE_IO_CONN_BOTH; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_rport_info, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + if (!hba->parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) cannot find parent queue pool", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + /* get parent queue info (by rport index) */ + if (v_rport_info->rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) free parent resource failed, invlaid rport_index(%u) rport_nport_id(0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id); + + return UNF_RETURN_ERROR; + } + v_parent_queue_info = &hba->parent_queue_mgr->parent_queues[v_rport_info->rport_index]; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + /* 1. for has been offload */ + if (v_parent_queue_info->offload_state == HIFC_QUEUE_STATE_OFFLOADED) { + v_parent_queue_info->offload_state = + HIFC_QUEUE_STATE_DESTROYING; + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + /* set reset state, in order to prevent I/O in_SQ */ + spin_lock_irqsave( + &v_parent_queue_info->parent_sq_info.parent_sq_enqueue_lock, + rst_flag); + v_parent_queue_info->parent_sq_info.sq_in_sess_rst = UNF_TRUE; + spin_unlock_irqrestore( + &v_parent_queue_info->parent_sq_info.parent_sq_enqueue_lock, + rst_flag); + + /* check pcie device state */ + if (HIFC_HBA_NOT_PRESENT(hba)) { + HIFC_TRACE( + UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) hba is not present, free directly. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + hifc_free_parent_queue_info(hba, v_parent_queue_info); + return RETURN_OK; + } + + v_parent_queue_info->parent_sq_info.del_start_jiff = jiffies; + (void)queue_delayed_work( + hba->work_queue, + &v_parent_queue_info->parent_sq_info.del_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SQ_DEL_STAGE_TIMEOUT_MS)); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to reset parent session, rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + /* Forcibly set both mode */ + mode = HIFC_SESS_RST_DELETE_IO_CONN_BOTH; + ret = hifc_send_session_rst_cmd(v_hba, v_parent_queue_info, + mode); + + return ret; + } else if (v_parent_queue_info->offload_state == + HIFC_QUEUE_STATE_INITIALIZED) { + /* 2. for resource has been alloc, but not offload */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) parent sq is not offloaded, free directly. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + hifc_free_parent_queue_info(hba, v_parent_queue_info); + + return RETURN_OK; + } else if (v_parent_queue_info->offload_state == + HIFC_QUEUE_STATE_OFFLOADING) { + /* 3. for driver has offloading CMND to uCode */ + hifc_push_destroy_parent_queue_sqe(v_hba, + v_parent_queue_info, + v_rport_info); + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) parent sq is offloading, push to delay free. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + return RETURN_OK; + } else { + /* other state */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) parent sq is not created, do not need free state(0x%x) rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", + hba->port_cfg.port_id, + v_parent_queue_info->offload_state, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + + return RETURN_OK; + } +} + +void hifc_free_parent_queue_mgr(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + unsigned int index = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + hba = (struct hifc_hba_s *)v_hba; + + if (!hba->parent_queue_mgr) + return; + parent_queue_mgr = hba->parent_queue_mgr; + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + if (parent_queue_mgr->parent_queues[index].parent_ctx.virt_parent_ctx) + parent_queue_mgr->parent_queues[index].parent_ctx.virt_parent_ctx = NULL; + } + + if (parent_queue_mgr->parent_sq_buf_list.buflist) { + for (index = 0; + index < parent_queue_mgr->parent_sq_buf_list.buf_num; + index++) { + if (parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr != 0) { + pci_unmap_single( + hba->pci_dev, + parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr, + parent_queue_mgr->parent_sq_buf_list.buf_size, + DMA_BIDIRECTIONAL); + parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr = 0; + } + if (parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr) { + kfree(parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr); + parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr = NULL; + } + } + + kfree(parent_queue_mgr->parent_sq_buf_list.buflist); + parent_queue_mgr->parent_sq_buf_list.buflist = NULL; + } + + vfree(parent_queue_mgr); + hba->parent_queue_mgr = NULL; +} + +void hifc_free_parent_queues(void *v_hba) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + hba = (struct hifc_hba_s *)v_hba; + parent_queue_mgr = hba->parent_queue_mgr; + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + if (parent_queue_mgr->parent_queues[index].offload_state == + HIFC_QUEUE_STATE_DESTROYING) { + spin_unlock_irqrestore( + &parent_queue_mgr->parent_queues[index].parent_queue_state_lock, + flag); + + (void)cancel_delayed_work_sync(&parent_queue_mgr->parent_queues[index].parent_sq_info.del_work); + (void)cancel_delayed_work_sync(&parent_queue_mgr->parent_queues[index].parent_sq_info.flush_done_tmo_work); + + /* free parent queue */ + hifc_free_parent_queue_info( + hba, + &parent_queue_mgr->parent_queues[index]); + continue; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } +} + +unsigned int hifc_alloc_parent_queue_mgr(void *v_hba) +{ + unsigned int index = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int alloc_idx; + unsigned int cur_buf_idx = 0; + unsigned int cur_buf_offset = 0; + unsigned int uiprtctxsize = sizeof(struct hifcoe_parent_context_s); + unsigned int buf_cnt_perhugebuf; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + + parent_queue_mgr = (struct hifc_parent_queue_mgr_s *)vmalloc( + sizeof(struct hifc_parent_queue_mgr_s)); + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cannot allocate queue manager", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + hba->parent_queue_mgr = parent_queue_mgr; + memset(parent_queue_mgr, 0, sizeof(struct hifc_parent_queue_mgr_s)); + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_init(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock); + parent_queue_mgr->parent_queues[index].offload_state = + HIFC_QUEUE_STATE_FREE; + parent_queue_mgr->parent_queues[index].parent_sq_info.queue_header_original = NULL; + spin_lock_init(&parent_queue_mgr->parent_queues[index].parent_sq_info.parent_sq_enqueue_lock); + parent_queue_mgr->parent_queues[index].parent_cmd_scq_info.cqm_queue_id = INVALID_VALUE32; + parent_queue_mgr->parent_queues[index].parent_sts_scq_info.cqm_queue_id = INVALID_VALUE32; + parent_queue_mgr->parent_queues[index].parent_els_srq_info.cqm_queue_id = INVALID_VALUE32; + parent_queue_mgr->parent_queues[index].parent_sq_info.del_start_jiff = INVALID_VALUE64; + parent_queue_mgr->parent_queues[index].queue_vport_id = + hba->vpid_start; + } + + buf_total_size = uiprtctxsize * UNF_HIFC_MAXRPORT_NUM; + parent_queue_mgr->parent_sq_buf_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? BUF_LIST_PAGE_SIZE : + buf_total_size; + buf_cnt_perhugebuf = + parent_queue_mgr->parent_sq_buf_list.buf_size / uiprtctxsize; + buf_num = + UNF_HIFC_MAXRPORT_NUM % buf_cnt_perhugebuf ? + UNF_HIFC_MAXRPORT_NUM / buf_cnt_perhugebuf + 1 : + UNF_HIFC_MAXRPORT_NUM / buf_cnt_perhugebuf; + parent_queue_mgr->parent_sq_buf_list.buflist = (struct buff_list_s *) + kmalloc(buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + parent_queue_mgr->parent_sq_buf_list.buf_num = buf_num; + + if (!parent_queue_mgr->parent_sq_buf_list.buflist) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate QueuMgr buf list failed out of memory"); + goto free_parent_queue; + } + memset(parent_queue_mgr->parent_sq_buf_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr = + kmalloc(parent_queue_mgr->parent_sq_buf_list.buf_size, + GFP_KERNEL); + if (!parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr) + goto free_parent_queue; + memset( + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr, + 0, parent_queue_mgr->parent_sq_buf_list.buf_size); + + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr = + pci_map_single( + hba->pci_dev, + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr, + parent_queue_mgr->parent_sq_buf_list.buf_size, + DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error( + hba->pci_dev, + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr)) { + parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr = 0; + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Map QueuMgr address failed"); + + goto free_parent_queue; + } + } + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + cur_buf_idx = index / buf_cnt_perhugebuf; + cur_buf_offset = uiprtctxsize * (index % buf_cnt_perhugebuf); + + parent_queue_mgr->parent_queues[index].parent_ctx.virt_parent_ctx = parent_queue_mgr->parent_sq_buf_list.buflist[cur_buf_idx].vaddr + cur_buf_offset; + parent_queue_mgr->parent_queues[index].parent_ctx.parent_ctx = parent_queue_mgr->parent_sq_buf_list.buflist[cur_buf_idx].paddr + cur_buf_offset; + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[EVENT]Allocate bufnum:%u,buf_total_size:%u", buf_num, + buf_total_size); + + return RETURN_OK; + +free_parent_queue: + hifc_free_parent_queue_mgr(hba); + return UNF_RETURN_ERROR; +} + +static void hifc_release_all_wqe_pages(struct hifc_hba_s *v_hba) +{ + unsigned int index; + struct hifc_sq_wqe_page_s *wpg = NULL; + + UNF_CHECK_VALID(0x2218, UNF_TRUE, v_hba, return); + + wpg = v_hba->sq_wpg_pool.wpg_pool_addr; + + for (index = 0; index < v_hba->sq_wpg_pool.wpg_cnt; index++) { + if (wpg->wpg_addr) { + dma_pool_free(v_hba->sq_wpg_pool.wpg_dma_pool, + wpg->wpg_addr, wpg->wpg_phy_addr); + wpg->wpg_addr = NULL; + wpg->wpg_phy_addr = 0; + } + + wpg++; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port[%u] free total %u wqepages", v_hba->port_index, + index); +} + +unsigned int hifc_alloc_parent_sq_wqe_page_pool(void *v_hba) +{ + unsigned int index = 0; + struct hifc_sq_wqe_page_pool_s *wpg_pool = NULL; + struct hifc_sq_wqe_page_s *wpg = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + wpg_pool = &hba->sq_wpg_pool; + + INIT_LIST_HEAD(&wpg_pool->list_free_wpg_pool); + spin_lock_init(&wpg_pool->wpg_pool_lock); + atomic_set(&wpg_pool->wpg_in_use, 0); + + /* Calculate the number of Wqe Page required in the pool */ + wpg_pool->wpg_size = wqe_page_size; + wpg_pool->wpg_cnt = (HIFC_MIN_WP_NUM * hba->image_count + + ((hba->exit_count * HIFC_SQE_SIZE) / + wpg_pool->wpg_size)); + + wpg_pool->wqe_per_wpg = wpg_pool->wpg_size / HIFC_SQE_SIZE; + + /* Craete DMA POOL */ + wpg_pool->wpg_dma_pool = dma_pool_create("hifc_wpg_pool", + &hba->pci_dev->dev, + wpg_pool->wpg_size, + HIFC_SQE_SIZE, 0); + if (!wpg_pool->wpg_dma_pool) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Cannot allocate SQ WqePage DMA pool"); + + goto out_create_dma_pool_err; + } + + /* Allocate arrays to record all WqePage addresses */ + wpg_pool->wpg_pool_addr = + (struct hifc_sq_wqe_page_s *) + vmalloc(wpg_pool->wpg_cnt * sizeof(struct hifc_sq_wqe_page_s)); + if (!wpg_pool->wpg_pool_addr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Allocate SQ WqePageAddr array failed"); + + goto out_alloc_wpg_array_err; + } + wpg = wpg_pool->wpg_pool_addr; + memset(wpg, 0, wpg_pool->wpg_cnt * sizeof(struct hifc_sq_wqe_page_s)); + + for (index = 0; index < wpg_pool->wpg_cnt; index++) { + /* Apply for WqePage from DMA POOL */ + wpg->wpg_addr = dma_pool_alloc(wpg_pool->wpg_dma_pool, + GFP_KERNEL, + (u64 *)&wpg->wpg_phy_addr); + if (!wpg->wpg_addr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, "[err]Dma pool allocated failed"); + + break; + } + + /* To ensure security, clear the memory */ + memset(wpg->wpg_addr, 0, wpg_pool->wpg_size); + + /* Add to the idle linked list */ + INIT_LIST_HEAD(&wpg->entry_wpg); + list_add_tail(&wpg->entry_wpg, + &wpg_pool->list_free_wpg_pool); + + wpg++; + } + /* ALL allocated successfully */ + if (index == wpg_pool->wpg_cnt) + return RETURN_OK; + + hifc_release_all_wqe_pages(hba); + vfree(wpg_pool->wpg_pool_addr); + wpg_pool->wpg_pool_addr = NULL; + +out_alloc_wpg_array_err: + dma_pool_destroy(wpg_pool->wpg_dma_pool); + wpg_pool->wpg_dma_pool = NULL; + +out_create_dma_pool_err: + return UNF_RETURN_ERROR; +} + +void hifc_free_parent_sq_wqe_page_pool(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(0x2220, UNF_TRUE, v_hba, return); + hba = (struct hifc_hba_s *)v_hba; + + hifc_release_all_wqe_pages(hba); + hba->sq_wpg_pool.wpg_cnt = 0; + + if (hba->sq_wpg_pool.wpg_pool_addr) { + vfree(hba->sq_wpg_pool.wpg_pool_addr); + hba->sq_wpg_pool.wpg_pool_addr = NULL; + } + + if (hba->sq_wpg_pool.wpg_dma_pool) { + dma_pool_destroy(hba->sq_wpg_pool.wpg_dma_pool); + hba->sq_wpg_pool.wpg_dma_pool = NULL; + } +} + +static inline void hifc_set_sq_wqe_owner_be(void *v_sqe) +{ + unsigned int *sqe_dw = (unsigned int *)v_sqe; + + /* Ensure that the write of WQE is complete */ + mb(); + sqe_dw[HIFC_SQE_SECOND_OBIT_DW_POS] |= HIFC_SQE_OBIT_SET_MASK_BE; + + /* Ensure that the write of Second Obit is complete */ + mb(); + sqe_dw[HIFC_SQE_FIRST_OBIT_DW_POS] |= HIFC_SQE_OBIT_SET_MASK_BE; +} + +static void hifc_free_head_wqe_page(struct hifc_parent_sq_info_s *v_sq) +{ + struct hifc_hba_s *hba = NULL; + struct hifc_sq_wqe_page_s *sq_wpg = NULL; + struct list_head *entry_head_wqe_page = NULL; + unsigned long flag = 0; + + atomic_dec(&v_sq->wqe_page_cnt); + + hba = (struct hifc_hba_s *)v_sq->phba; + sq_wpg = HIFC_GET_SQ_HEAD(v_sq); + memset((void *)sq_wpg->wpg_addr, WQE_MARKER_0, + hba->sq_wpg_pool.wpg_size); + + spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); + entry_head_wqe_page = &sq_wpg->entry_wpg; + list_del(entry_head_wqe_page); + list_add_tail(entry_head_wqe_page, + &hba->sq_wpg_pool.list_free_wpg_pool); + + /* WqePage Pool counter */ + atomic_dec(&hba->sq_wpg_pool.wpg_in_use); + + spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); +} + +static unsigned int hifc_parent_sq_ring_door_bell( + struct hifc_parent_sq_info_s *v_sq) +{ + unsigned int ret = RETURN_OK; + int ravl; + unsigned short pmsn; + unsigned char pmsn_lo; + unsigned char pmsn_hi; + unsigned long long db_val_qw; + struct hifc_hba_s *hba; + struct hifc_parent_sq_db_s door_bell; + + hba = (struct hifc_hba_s *)v_sq->phba; + pmsn = v_sq->last_pmsn; + /* Obtain the low 8 Bit of PMSN */ + pmsn_lo = (unsigned char)(pmsn & 0xFF); + /* Obtain the high 8 Bit of PMSN */ + pmsn_hi = (unsigned char)((pmsn >> 8) & 0xFF); + door_bell.wd0.service_type = HIFC_LSW(v_sq->service_type); + door_bell.wd0.cos = hba->port_index; + door_bell.wd0.c = 0; + door_bell.wd0.arm = HIFC_DB_ARM_DISABLE; + door_bell.wd0.cntx_size = HIFC_CNTX_SIZE_T_256B; + door_bell.wd0.vport = v_sq->vport_id; + door_bell.wd0.xid = v_sq->context_id; + door_bell.wd1.sm_data = v_sq->cache_id; + door_bell.wd1.qid = v_sq->sq_queue_id; + door_bell.wd1.pi_hi = (unsigned int)pmsn_hi; + + if (unlikely(v_sq->cache_id == INVALID_VALUE32)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) SQ(0x%x) send DB error invalid cachedid", + hba->port_cfg.port_id, v_sq->context_id); + HIFC_HBA_STAT(hba, HIFC_STAT_PARENT_SQ_INVALID_CACHED_ID); + return UNF_RETURN_ERROR; + } + + /* Fill Doorbell Record */ + db_val_qw = v_sq->queue_header->doorbell_record; + db_val_qw &= (unsigned long long)(~(0xFFFFFFFF)); + db_val_qw |= (unsigned long long)((unsigned long long)pmsn << 16 | + pmsn); + v_sq->queue_header->doorbell_record = cpu_to_be64(db_val_qw); + + /* ring doorbell */ + db_val_qw = *(unsigned long long *)&door_bell; + hifc_cpu_to_big32(&db_val_qw, sizeof(db_val_qw)); + + ravl = cqm_ring_hardware_db(hba->hw_dev_handle, SERVICE_T_FC, pmsn_lo, + db_val_qw); + if (unlikely(ravl != CQM_SUCCESS)) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]SQ(0x%x) send DB(0x%llx) failed", + v_sq->context_id, db_val_qw); + + ret = UNF_RETURN_ERROR; + } + + /* Doorbell success counter */ + atomic_inc(&v_sq->sq_dbl_cnt); + + return ret; +} + +unsigned int hifc_parent_sq_enqueue(struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_io_sqe) +{ + unsigned char wqe_type = 0; + unsigned int ret = RETURN_OK; + unsigned int addr_wd = INVALID_VALUE32; + unsigned int msn_wd = INVALID_VALUE32; + unsigned short link_wqe_msn = 0; + unsigned long flag = 0; + struct hifc_sq_wqe_page_s *new_wqe_page = NULL; + struct hifc_sq_wqe_page_s *tail_wpg = NULL; + struct hifcoe_sqe_s *sqe_in_wp = NULL; + struct hifc_link_wqe_s *link_wqe = NULL; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_sq->phba; + + wqe_type = (unsigned char)HIFC_GET_WQE_TYPE(v_io_sqe); + + /* Serial enqueue */ + spin_lock_irqsave(&v_sq->parent_sq_enqueue_lock, flag); + + /* If the SQ is invalid, the wqe is discarded */ + if (unlikely(!atomic_read(&v_sq->sq_valid))) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]SQ is invalid, reject wqe(0x%x)", wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * The heartbeat detection status is 0, which allows control sessions + * enqueuing + */ + if (unlikely((!hba->heart_status) && HIFC_WQE_IS_IO(v_io_sqe))) { + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Heart status is false"); + + return UNF_RETURN_ERROR; + } + + /* Ensure to be offloaded */ + if (unlikely(atomic_read(&v_sq->sq_cashed) != UNF_TRUE)) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + HIFC_HBA_STAT((struct hifc_hba_s *)v_sq->phba, + HIFC_STAT_PARENT_SQ_NOT_OFFLOADED); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]RPort(0x%x) Sq(0x%x) is not offloaded, reject wqe(0x%x)", + v_sq->rport_index, v_sq->context_id, wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * Whether the SQ is in the flush state. Temporarily allow the control + * sessions to enqueue. + */ + if (unlikely(v_sq->port_in_flush && HIFC_WQE_IS_IO(v_io_sqe))) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + HIFC_HBA_STAT((struct hifc_hba_s *)v_sq->phba, + HIFC_STAT_PARENT_IO_FLUSHED); + + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]SQ(0x%x) in flush, cmsn(0x%x)-pmsn(0x%x), reject wqe(0x%x)", + v_sq->context_id, + HIFC_GET_QUEUE_CMSN(v_sq), + v_sq->last_pmsn, wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * If the SQ is in the Seesion deletion state and is the WQE of the + * I/O path, the I/O failure is directly returned + */ + if (unlikely(v_sq->sq_in_sess_rst && HIFC_WQE_IS_IO(v_io_sqe))) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + HIFC_HBA_STAT((struct hifc_hba_s *)v_sq->phba, + HIFC_STAT_PARENT_IO_FLUSHED); + + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]SQ(0x%x) in session reset, reject wqe(0x%x)", + v_sq->context_id, wqe_type); + + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return UNF_RETURN_ERROR; + } + + /* + * The PMSN position of the SQE that can be put into the SQE is LinkWqe. + * Apply to the CQM for a new page + */ + tail_wpg = HIFC_GET_SQ_TAIL(v_sq); + + if (v_sq->wqe_offset == v_sq->wqe_num_per_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_INFO, + "[info]RPort(0x%x) Sq(0x%x) add wqepage at pmsn(0x%x), WpgCnt(0x%x)", + v_sq->rport_index, v_sq->context_id, v_sq->last_pmsn, + atomic_read(&v_sq->wqe_page_cnt)); + + /* Add a new Wqe Page */ + new_wqe_page = hifc_add_one_wqe_page(v_sq); + if (unlikely(!new_wqe_page)) { + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, + wqe_type); + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, + flag); + + return UNF_RETURN_ERROR; + } + + /* + * Set the next address of LinkWqe to the newly applied WqePage + */ + link_wqe = (struct hifc_link_wqe_s *) + hifc_get_wqe_page_entry(tail_wpg, v_sq->wqe_offset); + addr_wd = HIFC_MSD(new_wqe_page->wpg_phy_addr); + link_wqe->next_page_addr_hi = cpu_to_be32(addr_wd); + addr_wd = HIFC_LSD(new_wqe_page->wpg_phy_addr); + link_wqe->next_page_addr_lo = cpu_to_be32(addr_wd); + + /* Fill LinkWqe msn */ + link_wqe_msn = HIFC_MSN_DEC(v_sq->last_pmsn); + msn_wd = be32_to_cpu(link_wqe->val_wd1); + msn_wd |= ((unsigned int)(link_wqe_msn & 0xffff)); + msn_wd |= (((unsigned int)(link_wqe_msn & 0x7fff)) << 16); + link_wqe->val_wd1 = cpu_to_be32(msn_wd); + + /* Set LinkWqe's Owner Bit valid */ + hifc_set_sq_wqe_owner_be(link_wqe); + + /* The newly added WqePage starts from 0 */ + v_sq->wqe_offset = 0; + + /* Point to the tail, Link Wqe */ + tail_wpg = HIFC_GET_SQ_TAIL(v_sq); + + /* Update counter */ + atomic_inc(&v_sq->wqe_page_cnt); + } + + /* Set pmsn of WQE Control Section, and set Owner-Bit invalid */ + hifc_build_wqe_owner_pmsn(&v_io_sqe->ctrl_sl, !v_sq->last_pi_owner, + v_sq->last_pmsn); + + /* Port WQE send counter */ + HIFC_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + + /* + * Set Done Bit of WQE, convert Control and Task Section to big endian + */ + hifc_convert_parent_wqe_to_big_endian(v_io_sqe); + + /* + * Find the position of the pointer that the SQE is placed in the + * WQEPAGE + */ + sqe_in_wp = (struct hifcoe_sqe_s *) + hifc_get_wqe_page_entry(tail_wpg, v_sq->wqe_offset); + + /* Copy sqe from the local memory to WqePage */ + memcpy(sqe_in_wp, v_io_sqe, sizeof(struct hifcoe_sqe_s)); + + hifc_set_sq_wqe_owner_be(sqe_in_wp); + + /* ring DoorBell */ + ret = hifc_parent_sq_ring_door_bell(v_sq); + if (unlikely(ret != RETURN_OK)) + HIFC_ERR_IO_STAT((struct hifc_hba_s *)v_sq->phba, wqe_type); + + /* Update the count of the next SQE enqueuing */ + v_sq->wqe_offset += 1; + v_sq->last_pmsn = HIFC_MSN_INC(v_sq->last_pmsn); + + /* sq_wqe_cnt is updated for SQ statistics */ + atomic_inc(&v_sq->sq_wqe_cnt); + atomic_inc(&v_sq->sqe_minus_cqe_cnt); + HIFC_SQ_IO_STAT(v_sq, wqe_type); + spin_unlock_irqrestore(&v_sq->parent_sq_enqueue_lock, flag); + + return ret; +} + +static int hifc_msn_in_wqe_page(unsigned int start_msn, unsigned int end_msn, + unsigned int cur_msn) +{ + int ret = UNF_TRUE; + + if (end_msn >= start_msn) { + if ((cur_msn < start_msn) || (cur_msn > end_msn)) + ret = UNF_FALSE; + else + ret = UNF_TRUE; + + } else { + if ((cur_msn > end_msn) && (cur_msn < start_msn)) + ret = UNF_FALSE; + else + ret = UNF_TRUE; + } + + return ret; +} + +void hifc_free_sq_wqe_page(struct hifc_parent_sq_info_s *v_sq, + unsigned int cur_msn) +{ + unsigned short wpg_start_cmsn = 0; + unsigned short wpg_end_cmsn = 0; + int wqe_page_in_use; + + /* If there is only zero or one Wqe Page, no release is required */ + if (atomic_read(&v_sq->wqe_page_cnt) <= HIFC_MIN_WP_NUM) + return; + + /* + * Check whether the current MSN is within the MSN range covered + * by the WqePage + */ + wpg_start_cmsn = v_sq->head_start_cmsn; + wpg_end_cmsn = v_sq->head_end_cmsn; + wqe_page_in_use = hifc_msn_in_wqe_page(wpg_start_cmsn, + wpg_end_cmsn, cur_msn); + + /* + * If the value of CMSN is within the current Wqe Page, no release is + * required + */ + if (wqe_page_in_use == UNF_TRUE) + return; + /* Free WqePage */ + hifc_free_head_wqe_page(v_sq); + + /* Obtain the start MSN of the next WqePage */ + wpg_start_cmsn = HIFC_MSN_INC(wpg_end_cmsn); + + /* obtain the end MSN of the next WqePage */ + wpg_end_cmsn = HIFC_GET_WP_END_CMSN(wpg_start_cmsn, + v_sq->wqe_num_per_buf); + + /* Set new MSN range */ + v_sq->head_start_cmsn = wpg_start_cmsn; + v_sq->head_end_cmsn = wpg_end_cmsn; +} + +static void hifc_update_sq_wqe_completion_stat( + struct hifc_parent_sq_info_s *v_sq, + union hifcoe_scqe_u *v_scqe) +{ + struct hifcoe_scqe_rcv_els_gs_rsp_s *els_gs_rsp = NULL; + + els_gs_rsp = (struct hifcoe_scqe_rcv_els_gs_rsp_s *)v_scqe; + + /* + * For the ELS/GS RSP intermediate frame and the CQE that is more + * than the ELS_GS_RSP_EXCH_CHECK_FAIL, no statistics are required + */ + if (unlikely(HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_RSP) || + (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_GS_RSP)) { + if (!els_gs_rsp->wd3.end_rsp || !HIFC_SCQE_ERR_TO_CM(v_scqe)) + return; + } + + /* + * When the SQ statistics are updated, the PlogiAcc or PlogiAccSts + * that is implicitly unloaded will enter here, and one more CQE count + * is added + */ + atomic_inc(&v_sq->sq_cqe_cnt); + atomic_dec(&v_sq->sqe_minus_cqe_cnt); + HIFC_SQ_IO_STAT(v_sq, HIFC_GET_SCQE_TYPE(v_scqe)); +} + +unsigned int hifc_reclaim_sq_wqe_page(void *v_hba, union hifcoe_scqe_u *v_scqe) +{ + unsigned int cur_msn = 0; + unsigned int rport_index = INVALID_VALUE32; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_sq_info_s *sq = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long state_lock_flag = 0; + + hba = (struct hifc_hba_s *)v_hba; + rport_index = HIFC_GET_SCQE_CONN_ID(v_scqe); + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) do not have rport index: 0x%x", + hba->port_cfg.port_id, rport_index); + + return UNF_RETURN_ERROR; + } + + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[rport_index]; + sq = &v_parent_queue_info->parent_sq_info; + /* If there is only zero or one Wqe Page, no release is required */ + if (atomic_read(&sq->wqe_page_cnt) <= HIFC_MIN_WP_NUM) { + hifc_update_sq_wqe_completion_stat(sq, v_scqe); + return RETURN_OK; + } else { + spin_lock_irqsave( + &v_parent_queue_info->parent_queue_state_lock, + state_lock_flag); + + if (v_parent_queue_info->offload_state == + HIFC_QUEUE_STATE_FREE) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) already released, no need to reclaim sq wqepage", + hba->port_cfg.port_id, rport_index); + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + state_lock_flag); + + return RETURN_OK; + } + + cur_msn = HIFC_GET_QUEUE_CMSN(sq); + hifc_free_sq_wqe_page(sq, cur_msn); + hifc_update_sq_wqe_completion_stat(sq, v_scqe); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + state_lock_flag); + + return RETURN_OK; + } +} + +struct hifc_parent_queue_info_s *hifc_find_parent_queue_info_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int rport_index = 0; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + rport_index = v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + + if (unlikely(rport_index >= UNF_HIFC_MAXRPORT_NUM)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR, + "[warn]Port(0x%x) send pkg sid_did(0x%x_0x%x), but uplevel allocate invalid rport index: 0x%x", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did, rport_index); + + return NULL; + } + + /* parent -->> session */ + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[rport_index]; + + return v_parent_queue_info; +} + +struct hifc_parent_queue_info_s *hifc_find_parent_queue_info_by_id( + struct hifc_hba_s *v_hba, + unsigned int v_local_id, + unsigned int v_remote_id) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + + parent_queue_mgr = v_hba->parent_queue_mgr; + if (!parent_queue_mgr) + return NULL; + + /* rport_number -->> parent_number -->> session_number */ + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + /* local_id & remote_id & offload */ + if ((v_local_id == parent_queue_mgr->parent_queues[index].parent_sq_info.local_port_id) && + (v_remote_id == parent_queue_mgr->parent_queues[index].parent_sq_info.remote_port_id) && + (parent_queue_mgr->parent_queues[index].offload_state == + HIFC_QUEUE_STATE_OFFLOADED)) { + v_parent_queue_info = + &parent_queue_mgr->parent_queues[index]; + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + return v_parent_queue_info; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } + + return NULL; +} + +struct hifc_parent_queue_info_s *hifc_find_offload_parent_queue( + void *v_hba, + unsigned int v_local_id, + unsigned int v_remote_id, + unsigned int v_rport_index) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_hba_s *hba = v_hba; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + + parent_queue_mgr = hba->parent_queue_mgr; + if (!parent_queue_mgr) + return NULL; + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + if (index == v_rport_index) + continue; + + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + if ((v_local_id == parent_queue_mgr->parent_queues[index].parent_sq_info.local_port_id) && + (v_remote_id == parent_queue_mgr->parent_queues[index].parent_sq_info.remote_port_id) && + (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE) && + (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_INITIALIZED)) { + v_parent_queue_info = + &parent_queue_mgr->parent_queues[index]; + spin_unlock_irqrestore( + &parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + + return v_parent_queue_info; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } + + return NULL; +} + +struct hifc_parent_sq_info_s *hifc_find_parent_sq_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct cqm_qpc_mpt_s *cqm_parent_ctx_obj = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + v_parent_queue_info = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (unlikely(!v_parent_queue_info)) { + v_parent_queue_info = hifc_find_parent_queue_info_by_id( + hba, + v_pkg->frame_head.csctl_sid & + UNF_NPORTID_MASK, + v_pkg->frame_head.rctl_did & + UNF_NPORTID_MASK); + if (!v_parent_queue_info) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", + hba->port_cfg.port_id, + v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + } + + cqm_parent_ctx_obj = v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj; + if (unlikely(!cqm_parent_ctx_obj)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x) with this rport has not alloc parent sq information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + return &v_parent_queue_info->parent_sq_info; +} + +struct hifc_parent_ctx_s *hifc_get_parnt_ctx_virt_addr_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + v_parent_queue_info = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!v_parent_queue_info) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + if ((!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj) || + (!v_parent_queue_info->parent_ctx.virt_parent_ctx)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), but this rport have not allocate a parent context yet", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + if (!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), but cqm have not allocate a parent context yet", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return NULL; + } + + return &v_parent_queue_info->parent_ctx; +} + +unsigned int hifc_check_all_parent_queue_free(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + + parent_queue_mgr = v_hba->parent_queue_mgr; + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) get a null parent queue mgr", + v_hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave( + &parent_queue_mgr->parent_queues[index].parent_queue_state_lock, + flag); + + if (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE) { + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + return UNF_RETURN_ERROR; + } + + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_queue_state_lock, flag); + } + + return RETURN_OK; +} + +unsigned int hifc_get_parent_ctx_xid_by_pkg(void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + v_parent_queue_info = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!v_parent_queue_info) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return INVALID_VALUE32; + } + + if ((!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj) || + (!v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr)) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x),but this rport have not allocate a parent context yet", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return INVALID_VALUE32; + } + + return v_parent_queue_info->parent_ctx.cqm_parent_ctx_obj->xid; +} + +static void hifc_flush_specific_scq(struct hifc_hba_s *v_hba, + unsigned int index) +{ + /* + * The software interrupt is scheduled and processed during the second + * timeout period + */ + struct hifc_scq_info_s *scq_info = NULL; + unsigned int flush_done_time = 0; + + scq_info = &v_hba->scq_info[index]; + atomic_set(&scq_info->flush_state, HIFC_QUEUE_FLUSH_DOING); + tasklet_schedule(&scq_info->tasklet); + + /* + * Wait for a maximum of 2 seconds. If the SCQ soft interrupt is not + * scheduled within 2 seconds, only timeout is returned + */ + while ((atomic_read(&scq_info->flush_state) != HIFC_QUEUE_FLUSH_DONE) && + (flush_done_time < HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { + msleep(HIFC_QUEUE_FLUSH_WAIT_MS); + flush_done_time += HIFC_QUEUE_FLUSH_WAIT_MS; + tasklet_schedule(&scq_info->tasklet); + } + + if (atomic_read(&scq_info->flush_state) != HIFC_QUEUE_FLUSH_DONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) special scq(0x%x) flush timeout", + v_hba->port_cfg.port_id, index); + } +} + +static void hifc_flush_cmd_scq(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + + for (index = HIFC_CMD_SCQN_START; index < HIFC_SESSION_SCQ_NUM; + index += HIFC_SCQS_PER_SESSION) + hifc_flush_specific_scq(v_hba, index); +} + +static void hifc_flush_sts_scq(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + + /* for each STS SCQ */ + for (index = HIFC_STS_SCQN_START; index < HIFC_SESSION_SCQ_NUM; + index += HIFC_SCQS_PER_SESSION) + hifc_flush_specific_scq(v_hba, index); +} + +static void hifc_flush_all_scq(struct hifc_hba_s *v_hba) +{ + hifc_flush_cmd_scq(v_hba); + hifc_flush_sts_scq(v_hba); + /* Flush Default SCQ */ + hifc_flush_specific_scq(v_hba, HIFC_SESSION_SCQ_NUM); +} + +static void hifc_wait_root_rq_empty(struct hifc_hba_s *v_hba) +{ + unsigned int q_index; + struct hifc_root_info_s *root_info; + struct hifc_root_rq_info_s *rq_info; + unsigned int flush_done_time = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return); + + root_info = &v_hba->root_info; + + for (q_index = 0; q_index < root_info->rq_num; q_index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + q_index; + atomic_set(&rq_info->flush_state, HIFC_QUEUE_FLUSH_DOING); + flush_done_time = 0; + + while ((atomic_read(&rq_info->flush_state) != + HIFC_QUEUE_FLUSH_DONE) && + (flush_done_time < HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { + msleep(HIFC_QUEUE_FLUSH_WAIT_MS); + flush_done_time += HIFC_QUEUE_FLUSH_WAIT_MS; + tasklet_schedule(&rq_info->tasklet); + } + + if (atomic_read(&rq_info->flush_state) != + HIFC_QUEUE_FLUSH_DONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, + UNF_WARN, + "[warn]Port(0x%x) RootRq(0x%x) flush timeout", + v_hba->port_cfg.port_id, q_index); + } + } +} + +void hifc_wait_root_sq_empty(void *v_hba) +{ +#define HIFC_WAIT_ROOT_SQ_EMPTY_TIMEOUT_MS (50) + + unsigned int q_index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + unsigned int start_wait_time = 0; + int time_out = UNF_FALSE; + + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + + /* + * Traverse all root sq (just one) in the HBA and change the status to + * in_flush + */ + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + if (!sq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) root sq(0x%x) info is NULL", + hba->port_cfg.port_id, q_index); + continue; + } + + start_wait_time = 0; + time_out = UNF_TRUE; + + /* Wait 1 second to check whether the Root Sq is empty */ + do { + if (hifc_root_sq_is_empty(sq_info)) { + time_out = UNF_FALSE; + break; + } + msleep(20); + start_wait_time++; + } while (start_wait_time < HIFC_WAIT_ROOT_SQ_EMPTY_TIMEOUT_MS); + + if (time_out) { + HIFC_HBA_STAT(hba, HIFC_STAT_SQ_WAIT_EMPTY); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) waiting for root sq(0x%x) empty timeout", + hba->port_cfg.port_id, q_index); + } + } +} + +void hifc_wait_all_queues_empty(struct hifc_hba_s *v_hba) +{ + hifc_wait_root_rq_empty(v_hba); + hifc_flush_all_scq(v_hba); +} + +void hifc_set_root_sq_flush_state(void *v_hba, int in_flush) +{ + unsigned int q_index = 0; + unsigned long flags = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_sq_info_s *sq_info = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + root_info = &hba->root_info; + + /* + * for each root sq (so far, just one), + * set root sq state with been flushing or flush done + */ + for (q_index = 0; q_index < root_info->sq_num; q_index++) { + sq_info = (struct hifc_root_sq_info_s *)(root_info->sq_info) + + q_index; + if (!sq_info) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) root sq(0x%x) info is NULL", + hba->port_cfg.port_id, q_index); + continue; + } + + spin_lock_irqsave(&sq_info->root_sq_spin_lock, flags); + sq_info->in_flush = in_flush; + spin_unlock_irqrestore(&sq_info->root_sq_spin_lock, flags); + } +} + +void hifc_set_rport_flush_state(void *v_hba, int in_flush) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_parent_queue_mgr_s *parent_queue_mgr = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + parent_queue_mgr = hba->parent_queue_mgr; + if (!parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) parent queue manager is empty", + hba->port_cfg.port_id); + return; + } + + /* + * for each HBA's R_Port(SQ), + * set state with been flushing or flush done + */ + for (index = 0; index < UNF_HIFC_MAXRPORT_NUM; index++) { + spin_lock_irqsave(&parent_queue_mgr->parent_queues[index].parent_sq_info.parent_sq_enqueue_lock, flag); + if (parent_queue_mgr->parent_queues[index].offload_state != + HIFC_QUEUE_STATE_FREE) { + parent_queue_mgr->parent_queues[index].parent_sq_info.port_in_flush = in_flush; + } + spin_unlock_irqrestore(&parent_queue_mgr->parent_queues[index].parent_sq_info.parent_sq_enqueue_lock, flag); + } +} + +/** + * hifc_clear_fetched_sq_wqe - Inform the chip to clear the WQE that is being + * processed by the chip. + * @v_hba : hba handle + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_clear_fetched_sq_wqe(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + union hifc_cmdqe_u cmdqe; + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(0x4909, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + hba = (struct hifc_hba_s *)v_hba; + + /* + * The ROOT SQ cannot control the WQE in the empty queue of the ROOT SQ. + * Therefore, the ROOT SQ does not enqueue the WQE after the hardware + * obtains the. Link down after the wait mode is used. Therefore, + * the WQE of the hardware driver needs to enter the WQE of the queue + * after the Link down of the Link down is reported. + */ + hifc_wait_root_sq_empty(v_hba); + + memset(&cmdqe, 0, sizeof(union hifc_cmdqe_u)); + hifc_build_cmdqe_common(&cmdqe, HIFCOE_TASK_T_BUFFER_CLEAR, 0); + cmdqe.buffer_clear.wd1.rx_id_start = hba->exit_base; + cmdqe.buffer_clear.wd1.rx_id_end = + hba->exit_base + hba->exit_count - 1; + cmdqe.buffer_clear.scqn = hba->default_scqn; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) start clear all fetched wqe in start(0x%x) - end(0x%x) scqn(0x%x) stage(0x%x)", + hba->port_cfg.port_id, + cmdqe.buffer_clear.wd1.rx_id_start, + cmdqe.buffer_clear.wd1.rx_id_end, + cmdqe.buffer_clear.scqn, + hba->q_set_stage); + + /* Send BUFFER_CLEAR command via ROOT CMDQ */ + ret = hifc_root_cmdq_enqueue(hba, &cmdqe, + sizeof(cmdqe.buffer_clear)); + + return ret; +} + +/** + * hifc_clear_pending_sq_wqe -Inform the chip to clear the Pending Sq WQE that + * is being processed by the chip. + * @v_hba: hba handle + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_clear_pending_sq_wqe(void *v_hba) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rport_index = 0; + unsigned int entry_cnt = 0; + unsigned int entry_cnt_max = 0; + unsigned int next_clr_sq = 0; + unsigned int cmdqe_len = 0; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *parent_qinfo; + struct hifcoe_cmdqe_flush_sq_info_s *entry = NULL; + union hifc_cmdqe_u *cmdqe = NULL; + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + cmdqe = (union hifc_cmdqe_u *)kmalloc(HIFC_CMDQE_BUFF_LEN_MAX, + GFP_ATOMIC); + if (!cmdqe) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL, + "[err]Port(0x%x) malloc flush sq information buffer cmnd failed, stage(0x%x)", + hba->port_cfg.port_id, hba->q_set_stage); + + return UNF_RETURN_ERROR; + } + + memset(cmdqe, 0, HIFC_CMDQE_BUFF_LEN_MAX); + hifc_build_cmdqe_common(cmdqe, HIFCOE_TASK_T_FLUSH_SQ, 0); + cmdqe->flush_sq.wd0.wqe_type = HIFCOE_TASK_T_FLUSH_SQ; + cmdqe->flush_sq.wd0.sq_qid = HIFC_LSW(hba->default_sq_id); + cmdqe->flush_sq.wd1.scqn = HIFC_LSW(hba->default_scqn); + cmdqe->flush_sq.wd1.port_id = hba->port_index; + + /* + * The CMDQE can contain a maximum of Clear 253 SQ information at a time + */ + entry_cnt = 0; + entry_cnt_max = (HIFC_CMDQE_BUFF_LEN_MAX - sizeof(cmdqe->flush_sq)) / + sizeof(*entry); + entry = cmdqe->flush_sq.sq_info_entry; + next_clr_sq = hba->next_clearing_sq; + + for (rport_index = next_clr_sq; rport_index < UNF_HIFC_MAXRPORT_NUM; + rport_index++) { + parent_qinfo = + &hba->parent_queue_mgr->parent_queues[rport_index]; + + spin_lock_irqsave(&parent_qinfo->parent_queue_state_lock, flag); + if (HIFC_RPORT_FLUSH_NOT_NEEDED(parent_qinfo)) { + spin_unlock_irqrestore( + &parent_qinfo->parent_queue_state_lock, flag); + next_clr_sq++; + continue; + } + entry->xid = parent_qinfo->parent_sq_info.context_id; + entry->cid = parent_qinfo->parent_sq_info.cache_id; + spin_unlock_irqrestore(&parent_qinfo->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) RPort[0x%x] flush pending SQ Entry: xid=0x%x, cid=0x%x", + hba->port_cfg.port_id, rport_index, + entry->xid, entry->cid); + + entry_cnt++; + entry++; + next_clr_sq++; + + if (entry_cnt >= entry_cnt_max) + break; + } + + if (entry_cnt == 0) { + /* If no SQ needs to be flushed, the Clear Done command is + * directly sent to the uP + */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_INFO, + "[info]Port(0x%x) non SQ need flush wqe, clear done directly, stage (0x%x)", + hba->port_cfg.port_id, hba->q_set_stage); + + /* Sends the Clear Done command to the chip */ + ret = hifc_clear_sq_wqe_done(hba); + goto free_flush_sq_cmdqe; + } + + hba->next_clearing_sq = next_clr_sq; + cmdqe->flush_sq.wd0.entry_count = entry_cnt; + + if (rport_index == UNF_HIFC_MAXRPORT_NUM) + cmdqe->flush_sq.wd1.last_wqe = 1; + else + cmdqe->flush_sq.wd1.last_wqe = 0; + + /* Clear pending Queue */ + cmdqe_len = (unsigned int)(sizeof(cmdqe->flush_sq) + + entry_cnt * sizeof(*entry)); + ret = hifc_root_cmdq_enqueue(hba, cmdqe, (unsigned short)cmdqe_len); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) clear total 0x%x SQ in this CMDQE(last=%u), stage (0x%x)", + hba->port_cfg.port_id, entry_cnt, + cmdqe->flush_sq.wd1.last_wqe, hba->q_set_stage); + +free_flush_sq_cmdqe: + kfree(cmdqe); + + return ret; +} + +unsigned int hifc_wait_queue_set_flush_done(struct hifc_hba_s *v_hba) +{ + unsigned int flush_done_time = 0; + unsigned int ret = RETURN_OK; + + while ((v_hba->q_set_stage != HIFC_QUEUE_SET_STAGE_FLUSHDONE) && + (flush_done_time < HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { + msleep(HIFC_QUEUE_FLUSH_WAIT_MS); + flush_done_time += HIFC_QUEUE_FLUSH_WAIT_MS; + } + + if (v_hba->q_set_stage != HIFC_QUEUE_SET_STAGE_FLUSHDONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) queue sets flush timeout with stage(0x%x)", + v_hba->port_cfg.port_id, v_hba->q_set_stage); + + ret = UNF_RETURN_ERROR; + } + + return ret; +} + +static void hifc_disable_all_scq_schedule(struct hifc_hba_s *v_hba) +{ + struct hifc_scq_info_s *scq_info = NULL; + unsigned int index = 0; + + for (index = 0; index < HIFC_TOTAL_SCQ_NUM; index++) { + scq_info = &v_hba->scq_info[index]; + tasklet_disable(&scq_info->tasklet); + } +} + +static void hifc_disable_root_rq_schedule(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + root_info = &v_hba->root_info; + + for (index = 0; index < root_info->rq_num; index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + index; + tasklet_disable(&rq_info->tasklet); + } +} + +void hifc_disable_queues_dispatch(struct hifc_hba_s *v_hba) +{ + hifc_disable_root_rq_schedule(v_hba); + hifc_disable_all_scq_schedule(v_hba); +} + +static void hifc_enable_root_rq_schedule(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + struct hifc_root_info_s *root_info = NULL; + struct hifc_root_rq_info_s *rq_info = NULL; + + root_info = &v_hba->root_info; + + for (index = 0; index < root_info->rq_num; index++) { + rq_info = (struct hifc_root_rq_info_s *)(root_info->rq_info) + + index; + tasklet_enable(&rq_info->tasklet); + } +} + +static void hifc_enable_all_scq_schedule(struct hifc_hba_s *v_hba) +{ + struct hifc_scq_info_s *scq_info = NULL; + unsigned int index = 0; + + for (index = 0; index < HIFC_TOTAL_SCQ_NUM; index++) { + scq_info = &v_hba->scq_info[index]; + tasklet_enable(&scq_info->tasklet); + } +} + +void hifc_enable_queues_dispatch(void *v_hba) +{ + hifc_enable_root_rq_schedule((struct hifc_hba_s *)v_hba); + hifc_enable_all_scq_schedule((struct hifc_hba_s *)v_hba); +} + +void hifc_clear_els_srq(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + unsigned long flag = 0; + struct hifc_srq_info_s *srq_info = NULL; + + srq_info = &v_hba->els_srq_info; + + spin_lock_irqsave(&srq_info->srq_spin_lock, flag); + if ((srq_info->enable == UNF_FALSE) || + (srq_info->state == HIFC_CLEAN_DOING)) { + spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); + + return; + } + srq_info->enable = UNF_FALSE; + srq_info->state = HIFC_CLEAN_DOING; + spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); + + hifc_send_clear_srq_cmd(v_hba, &v_hba->els_srq_info); + + /* wait for uCode to clear SRQ context, the timer is 30S */ + while ((srq_info->state != HIFC_CLEAN_DONE) && (index < 60)) { + msleep(500); + index++; + } + + if (srq_info->state != HIFC_CLEAN_DONE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "[warn]HIFC Port(0x%x) clear els srq timeout", + v_hba->port_cfg.port_id); + } +} + +unsigned int hifc_wait_all_parent_queue_free(struct hifc_hba_s *v_hba) +{ + unsigned int index = 0; + unsigned int ret = UNF_RETURN_ERROR; + + do { + ret = hifc_check_all_parent_queue_free(v_hba); + if (ret == RETURN_OK) + break; + + index++; + msleep(20); + } while (index < 1500); + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[warn]Port(0x%x) wait all parent queue state free timeout", + v_hba->port_cfg.port_id); + } + + return ret; +} + +void hifc_queue_pre_process(void *v_hba, int v_clean) +{ +#define HIFC_WAIT_LINKDOWN_EVENT_MS 500 + + /* From port reset & port remove */ + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_hba; + + /* 1. Wait for 2s and wait for QUEUE to be FLUSH Done. */ + if (hifc_wait_queue_set_flush_done(hba) != RETURN_OK) { + /* + * During the process of removing the card, if the port is + * disabled and the flush done is not available, the chip is + * powered off or the pcie link is disconnected. In this case, + * you can proceed with the next step. + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]HIFC Port(0x%x) clean queue sets timeout", + hba->port_cfg.port_id); + } + + /* + * 2. Port remove: + * 2.1 free parent queue + * 2.2 clear & destroy ELS/SIRT SRQ + */ + if (v_clean == UNF_TRUE) { + if (hifc_wait_all_parent_queue_free(hba) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]HIFC Port(0x%x) free all parent queue timeout", + hba->port_cfg.port_id); + } + + /* clear & than destroy ELS SRQ */ + hifc_clear_els_srq(hba); + } + + msleep(HIFC_WAIT_LINKDOWN_EVENT_MS); + + /* + * 3. The internal resources of the port chip are flush done. However, + * there may be residual scqe or rq in the queue. The scheduling is + * forcibly refreshed once. + */ + hifc_wait_all_queues_empty(hba); + + /* + * 4. Disable tasklet scheduling for upstream queues on the software + * layer + */ + hifc_disable_queues_dispatch(hba); +} + +unsigned int hifc_push_delay_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_offload_parent_queue, + struct hifc_root_sqe_s *v_sqe, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&v_offload_parent_queue->parent_queue_state_lock, + flag); + + if ((v_offload_parent_queue->offload_state != + HIFC_QUEUE_STATE_INITIALIZED) && + (v_offload_parent_queue->offload_state != HIFC_QUEUE_STATE_FREE)) { + memcpy(&v_offload_parent_queue->parent_sq_info.delay_sqe.sqe, + v_sqe, sizeof(struct hifc_root_sqe_s)); + v_offload_parent_queue->parent_sq_info.delay_sqe.start_jiff = + jiffies; + v_offload_parent_queue->parent_sq_info.delay_sqe.time_out = + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER]; + v_offload_parent_queue->parent_sq_info.delay_sqe.valid = + UNF_TRUE; + v_offload_parent_queue->parent_sq_info.delay_sqe.rport_index = + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + v_offload_parent_queue->parent_sq_info.delay_sqe.sid = + v_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK; + v_offload_parent_queue->parent_sq_info.delay_sqe.did = + v_pkg->frame_head.rctl_did & UNF_NPORTID_MASK; + + spin_unlock_irqrestore( + &v_offload_parent_queue->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) delay send ELS, OXID(0x%x), RXID(0x%x)", + ((struct hifc_hba_s *)v_hba)->port_cfg.port_id, + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX], + UNF_GET_OXID(v_pkg), UNF_GET_RXID(v_pkg)); + + return RETURN_OK; + } + + spin_unlock_irqrestore(&v_offload_parent_queue->parent_queue_state_lock, + flag); + + return UNF_RETURN_ERROR; +} + +void hifc_pop_delay_sqe(struct hifc_hba_s *v_hba, + struct hifc_delay_sqe_ctrl_info_s *v_sqe_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + unsigned int delay_rport_index = INVALID_VALUE32; + struct hifc_parent_queue_info_s *parent_queue = NULL; + enum hifc_parent_queue_state_e offload_state = + HIFC_QUEUE_STATE_DESTROYING; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + + /* + * According to the sequence, the rport index id is reported and then + * the sqe of the new link setup request is delivered. + */ + if (v_sqe_info->valid != UNF_TRUE) + return; + if (jiffies_to_msecs(jiffies - v_sqe_info->start_jiff) >= + v_sqe_info->time_out) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) pop delay root sqe failed, sqe start time 0x%llx, timeout value 0x%x", + v_hba->port_cfg.port_id, + v_sqe_info->start_jiff, + v_sqe_info->time_out); + } + + delay_rport_index = v_sqe_info->rport_index; + if (delay_rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) pop delay root sqe failed, rport index(0x%x) is invalid", + v_hba->port_cfg.port_id, + delay_rport_index); + + return; + } + + parent_queue = + &v_hba->parent_queue_mgr->parent_queues[delay_rport_index]; + + /* Before the root sq is delivered, check the status again to + * ensure that the initialization status is not uninstalled. Other + * states are not processed and are discarded directly. + */ + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + offload_state = parent_queue->offload_state; + + /* Before re-enqueuing the rootsq, check whether the offload status and + * connection information is consistent to prevent the old request from + * being sent after the connection status is changed. + */ + if ((offload_state == HIFC_QUEUE_STATE_INITIALIZED) && + (parent_queue->parent_sq_info.local_port_id == v_sqe_info->sid) && + (parent_queue->parent_sq_info.remote_port_id == v_sqe_info->did) && + HIFC_CHECK_XID_MATCHED( + parent_queue->parent_sq_info.context_id, + v_sqe_info->sqe.task_section.fc_dw4.parent_xid)) { + parent_queue->offload_state = HIFC_QUEUE_STATE_OFFLOADING; + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) pop up delay sqe to root sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + v_hba->port_cfg.port_id, + v_sqe_info->start_jiff, + v_sqe_info->time_out, + delay_rport_index, + offload_state); + + ret = hifc_root_sq_enqueue(v_hba, &v_sqe_info->sqe); + if (ret != RETURN_OK) { + spin_lock_irqsave( + &parent_queue->parent_queue_state_lock, flag); + + if (parent_queue->offload_state == + HIFC_QUEUE_STATE_OFFLOADING) + parent_queue->offload_state = offload_state; + + if (parent_queue->parent_sq_info.destroy_sqe.valid == + UNF_TRUE) { + memcpy( + &destroy_sqe_info, + &parent_queue->parent_sq_info.destroy_sqe, + sizeof(struct hifc_destroy_ctrl_info_s)); + + parent_queue->parent_sq_info.destroy_sqe.valid = + UNF_FALSE; + } + + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + + hifc_pop_destroy_parent_queue_sqe((void *)v_hba, + &destroy_sqe_info); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) pop up delay sqe to root sq fail, recover offload state 0x%x", + v_hba->port_cfg.port_id, + parent_queue->offload_state); + } + } else { + ret = UNF_RETURN_ERROR; + spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, + flag); + } + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x pop delay root sqe failed, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + v_hba->port_cfg.port_id, + v_sqe_info->start_jiff, + v_sqe_info->time_out, + delay_rport_index, + offload_state); + } +} + +void hifc_push_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_qinfo, + struct unf_rport_info_s *v_rport_info) +{ + v_parent_qinfo->parent_sq_info.destroy_sqe.valid = UNF_TRUE; + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_index = + v_rport_info->rport_index; + v_parent_qinfo->parent_sq_info.destroy_sqe.time_out = + HIFC_SQ_DEL_STAGE_TIMEOUT_MS; + v_parent_qinfo->parent_sq_info.destroy_sqe.start_jiff = jiffies; + + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id = + v_rport_info->nport_id; + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index = + v_rport_info->rport_index; + v_parent_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name = + v_rport_info->port_name; +} + +void hifc_pop_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_destroy_ctrl_info_s *v_destroy_sqe_info) +{ + struct hifc_hba_s *hba = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + unsigned int delay_rport_index = INVALID_VALUE32; + struct hifc_parent_queue_info_s *parent_queue = NULL; + enum hifc_parent_queue_state_e offload_state = + HIFC_QUEUE_STATE_DESTROYING; + + hba = (struct hifc_hba_s *)v_hba; + + if (v_destroy_sqe_info->valid != UNF_TRUE) + return; + + if (jiffies_to_msecs(jiffies - v_destroy_sqe_info->start_jiff) < + v_destroy_sqe_info->time_out) { + delay_rport_index = v_destroy_sqe_info->rport_index; + parent_queue = + &hba->parent_queue_mgr->parent_queues[delay_rport_index]; + + /* Before delivery, check the status again to ensure that the + * initialization status is not uninstalled. Other states are + * not processed and are discarded directly. + */ + spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); + + offload_state = parent_queue->offload_state; + if ((offload_state == HIFC_QUEUE_STATE_OFFLOADED) || + (offload_state == HIFC_QUEUE_STATE_INITIALIZED)) { + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port 0x%x pop up delay destroy parent sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + hba->port_cfg.port_id, + v_destroy_sqe_info->start_jiff, + v_destroy_sqe_info->time_out, + delay_rport_index, + offload_state); + ret = hifc_free_parent_resource( + hba, + &v_destroy_sqe_info->rport_info); + } else { + ret = UNF_RETURN_ERROR; + spin_unlock_irqrestore( + &parent_queue->parent_queue_state_lock, flag); + } + } + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x pop delay destroy parent sq failed, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, rport nport id 0x%x,offload state 0x%x", + hba->port_cfg.port_id, + v_destroy_sqe_info->start_jiff, + v_destroy_sqe_info->time_out, + delay_rport_index, + v_destroy_sqe_info->rport_info.nport_id, + offload_state); + } +} + +void hifc_free_parent_queue_info( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info) +{ + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rport_index = INVALID_VALUE32; + struct hifc_hba_s *hba = NULL; + struct hifc_delay_sqe_ctrl_info_s sqe_info; + + memset(&sqe_info, 0, sizeof(struct hifc_delay_sqe_ctrl_info_s)); + hba = (struct hifc_hba_s *)v_hba; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) begin to free parent sq, rport_index(0x%x)", + hba->port_cfg.port_id, + v_parent_queue_info->parent_sq_info.rport_index); + + if (v_parent_queue_info->offload_state == HIFC_QUEUE_STATE_FREE) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[info]Port(0x%x) duplicate free parent sq, rport_index(0x%x)", + hba->port_cfg.port_id, + v_parent_queue_info->parent_sq_info.rport_index); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, + flag); + return; + } + + if (v_parent_queue_info->parent_sq_info.delay_sqe.valid == UNF_TRUE) { + memcpy(&sqe_info, + &v_parent_queue_info->parent_sq_info.delay_sqe, + sizeof(struct hifc_delay_sqe_ctrl_info_s)); + } + + rport_index = v_parent_queue_info->parent_sq_info.rport_index; + + /* The Parent Contexe and SQ information is released. After + * initialization, the Parent Contexe and SQ information is associated + * with the sq in the queue of the parent + */ + hifc_free_parent_sq(hba, v_parent_queue_info); + + /* The initialization of all queue id is invalid */ + v_parent_queue_info->parent_cmd_scq_info.cqm_queue_id = INVALID_VALUE32; + v_parent_queue_info->parent_sts_scq_info.cqm_queue_id = INVALID_VALUE32; + v_parent_queue_info->parent_els_srq_info.cqm_queue_id = INVALID_VALUE32; + v_parent_queue_info->offload_state = HIFC_QUEUE_STATE_FREE; + + spin_unlock_irqrestore(&v_parent_queue_info->parent_queue_state_lock, + flag); + + UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RELEASE_RPORT_INDEX, + (void *)&rport_index); + hifc_pop_delay_sqe(hba, &sqe_info); + + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Port(0x%x) free parent sq with rport_index(0x%x) failed", + hba->port_cfg.port_id, rport_index); + } +} + +void hifc_build_session_rst_wqe(void *v_hba, + struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_sqe, + enum hifc_session_reset_mode_e v_mode, + unsigned int scqn) +{ + struct hifc_hba_s *hba = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + /* + * The reset session command does not occupy xid. Therefore, + * 0xffff can be used to align with the microcode. + */ + v_sqe->ts_sl.task_type = HIFC_SQE_SESS_RST; + v_sqe->ts_sl.local_xid = 0xffff; + v_sqe->ts_sl.wd0.conn_id = (unsigned short)(v_sq->rport_index); + v_sqe->ts_sl.wd0.remote_xid = 0xffff; + + v_sqe->ts_sl.cont.reset_session.wd0.reset_exch_start = hba->exit_base; + v_sqe->ts_sl.cont.reset_session.wd0.reset_exch_end = hba->exit_base + + (hba->exit_count - 1); + v_sqe->ts_sl.cont.reset_session.wd1.reset_did = v_sq->remote_port_id; + v_sqe->ts_sl.cont.reset_session.wd1.mode = v_mode; + v_sqe->ts_sl.cont.reset_session.wd2.reset_sid = v_sq->local_port_id; + v_sqe->ts_sl.cont.reset_session.wd3.scqn = scqn; + + hifc_build_common_wqe_ctrls(&v_sqe->ctrl_sl, + sizeof(struct hifcoe_sqe_ts_s) / + HIFC_WQE_SECTION_CHUNK_SIZE); +} + +unsigned int hifc_send_session_rst_cmd( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info, + enum hifc_session_reset_mode_e v_mode) +{ + struct hifc_parent_sq_info_s *sq = NULL; + struct hifcoe_sqe_s rst_sess_sqe; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int sts_scqn = 0; + + memset(&rst_sess_sqe, 0, sizeof(struct hifcoe_sqe_s)); + sq = &v_parent_queue_info->parent_sq_info; + sts_scqn = ((struct hifc_hba_s *)v_hba)->default_scqn; + hifc_build_session_rst_wqe(v_hba, sq, &rst_sess_sqe, v_mode, sts_scqn); + + /* Run the sq command to issue the reset session command to the + * microcode, that is, the last command. + */ + ret = hifc_parent_sq_enqueue(sq, &rst_sess_sqe); + + return ret; +} + +void hifc_rcvd_els_from_srq_time_out(struct work_struct *work) +{ + struct hifc_hba_s *hba = NULL; + + hba = container_of(work, struct hifc_hba_s, delay_info.del_work.work); + + /* + * If the frame is not processed, the frame is pushed to the CM layer: + * The frame may have been processed when the root rq receives data. + */ + if (hba->delay_info.srq_delay_flag) { + hifc_rcv_els_cmnd( + hba, &hba->delay_info.pkg, + hba->delay_info.pkg.unf_cmnd_pload_bl.buffer_ptr, + 0, UNF_FALSE); + hba->delay_info.srq_delay_flag = 0; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) srq delay work timeout, send saved plgoi to CM", + hba->port_cfg.port_id); + } +} + +unsigned int hifc_rport_session_rst(void *v_hba, + struct unf_rport_info_s *v_rport_info) +{ + /* NOT USE NOW */ + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *v_parent_queue_info = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + HIFC_CHECK(INVALID_VALUE32, v_hba, return UNF_RETURN_ERROR); + HIFC_CHECK(INVALID_VALUE32, v_rport_info, return UNF_RETURN_ERROR); + + hba = (struct hifc_hba_s *)v_hba; + if (!hba->parent_queue_mgr) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x cannot find parent queue pool", + hba->port_cfg.port_id); + + return UNF_RETURN_ERROR; + } + + if (v_rport_info->rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port 0x%x free parent resource failed, invlaid rport index %u,Rport NPortId 0x%x", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_rport_info->nport_id); + + return UNF_RETURN_ERROR; + } + + v_parent_queue_info = + &hba->parent_queue_mgr->parent_queues[v_rport_info->rport_index]; + + spin_lock_irqsave(&v_parent_queue_info->parent_queue_state_lock, flag); + + if (v_parent_queue_info->offload_state == HIFC_QUEUE_STATE_OFFLOADED) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port 0x%x parent sq reset session, rport index 0x%x:0x%x,local nportid 0x%x,remote nportid 0x%x:0x%x,ctx id 0x%x, cid 0x%x", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id, + v_parent_queue_info->parent_sq_info.context_id, + v_parent_queue_info->parent_sq_info.cache_id); + + /* this scenario does not exist */ + (void)queue_delayed_work( + hba->work_queue, + &v_parent_queue_info->parent_sq_info.del_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SQ_DEL_STAGE_TIMEOUT_MS)); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, flag); + + /* + * The current session reset is in clear I/O mode, and the + * connection resources are not deleted + */ + ret = hifc_send_session_rst_cmd(hba, + v_parent_queue_info, + HIFC_SESS_RST_DELETE_IO_ONLY); + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port 0x%x parent sq is not offloaded, no need reset session , rport index 0x%x:0x%x,local nportid 0x%x,remote nportid 0x%x:0x%x", + hba->port_cfg.port_id, + v_rport_info->rport_index, + v_parent_queue_info->parent_sq_info.rport_index, + v_parent_queue_info->parent_sq_info.local_port_id, + v_rport_info->nport_id, + v_parent_queue_info->parent_sq_info.remote_port_id); + + spin_unlock_irqrestore( + &v_parent_queue_info->parent_queue_state_lock, flag); + + ret = RETURN_OK; + } + + return ret; +} + +/** + * hifc_flush_ini_resp_queue - Pay attention to the processing that is being + * processed, but do not pay attention to the subsequent + * processing. This is the main difference between the + * HIFC_FlushScq and the HIFC_FlushScq. + * @v_hba: hba handle + * @Return: 0 - success, negative - failure + */ +unsigned int hifc_flush_ini_resp_queue(void *v_hba) +{ + struct hifc_hba_s *hba = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + hba = (struct hifc_hba_s *)v_hba; + + /* + * Although this function is called, the original HIFC_FlushScq is based + * on the scenario where the port is disabled. That is, the function is + * executed and the SCQ is empty. However, because the port is not + * disabled in the current scenario, it can only indicate that a batch + * of processing is completed. + */ + hifc_flush_sts_scq(hba); + + return RETURN_OK; +} + +/* + * Function Name : hifc_handle_aeq_queue_error + * Function Description: Process the queue error event sent by the chip + * through AEQ. + * Input Parameters : *v_hba, + * : *v_aeq_msg + * Output Parameters : N/A + * Return Type : void + */ +static void hifc_handle_aeq_queue_error(struct hifc_hba_s *v_hba, + struct hifcoe_aqe_data_s *v_aeq_msg) +{ + unsigned int sts_scqn_local = 0; + unsigned int full_ci = INVALID_VALUE32; + unsigned int full_ci_owner = INVALID_VALUE32; + struct hifc_scq_info_s *scq_info = NULL; + struct hifcoe_aqe_data_s *aeq_msg = NULL; + + aeq_msg = v_aeq_msg; + + sts_scqn_local = HIFC_RPORTID_TO_STS_SCQN(aeq_msg->wd0.conn_id); + scq_info = &v_hba->scq_info[sts_scqn_local]; + full_ci = scq_info->ci; + full_ci_owner = scq_info->ci_owner; + + /* + * Currently, Flush is forcibly set to StsScq. No matter whether scq is + * processed, AEQE is returned + */ + tasklet_schedule(&scq_info->tasklet); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) LocalScqn(0x%x) CqmScqn(0x%x) is full, force flush CI from (%d|0x%x) to (%d|0x%x)", + v_hba->port_cfg.port_id, aeq_msg->wd0.conn_id, + sts_scqn_local, scq_info->scqn, + full_ci_owner, full_ci, scq_info->ci_owner, scq_info->ci); +} + +void hifc_process_aeqe(void *v_srv_handle, + unsigned char event_type, + u64 event_val) +{ + unsigned int ret = RETURN_OK; + struct hifc_hba_s *hba = (struct hifc_hba_s *)v_srv_handle; + struct hifcoe_aqe_data_s aeq_msg; + unsigned long long aeq_info = 0; + unsigned char event_code = INVALID_VALUE8; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, hba, return); + + aeq_info = cpu_to_be64(event_val); + memcpy(&aeq_msg, (struct hifcoe_aqe_data_s *)&aeq_info, + sizeof(struct hifcoe_aqe_data_s)); + hifc_big_to_cpu32(&aeq_msg, sizeof(struct hifcoe_aqe_data_s)); + event_code = (unsigned char)aeq_msg.wd0.evt_code; + + switch (event_type) { + case FC_AEQ_EVENT_QUEUE_ERROR: + hifc_handle_aeq_queue_error(hba, &aeq_msg); + break; + + case FC_AEQ_EVENT_WQE_FATAL_ERROR: + UNF_LOWLEVEL_PORT_EVENT(ret, + hba->lport, + UNF_PORT_ABNORMAL_RESET, + NULL); + break; + + case FC_AEQ_EVENT_CTX_FATAL_ERROR: + break; + + case FC_AEQ_EVENT_OFFLOAD_ERROR: + ret = hifc_handle_aeq_offload_err(hba, &aeq_msg); + break; + + default: + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) receive a unsupported AEQ EventType(0x%x) EventVal(0x%llx).", + hba->port_cfg.port_id, event_type, + (unsigned long long)event_val); + return; + } + + if (event_code < FC_AEQ_EVT_ERR_CODE_BUTT) + HIFC_AEQ_ERR_TYPE_STAT(hba, aeq_msg.wd0.evt_code); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]Port(0x%x) receive AEQ EventType(0x%x) EventVal(0x%llx) EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x) %s", + hba->port_cfg.port_id, event_type, + (unsigned long long)event_val, event_code, + aeq_msg.wd0.conn_id, aeq_msg.wd1.xid, + (ret == UNF_RETURN_ERROR) ? "ERROR" : "OK"); +} + diff --git a/drivers/scsi/huawei/hifc/hifc_queue.h b/drivers/scsi/huawei/hifc/hifc_queue.h new file mode 100644 index 000000000000..cc3e753be7a6 --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_queue.h @@ -0,0 +1,1363 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_QUEUE_H__ +#define __HIFC_QUEUE_H__ + +#include "hifc_wqe.h" +#include "hifc_hw.h" +#include "hifc_hwif.h" +#include "hifc_cqm_main.h" + +#define WQE_MARKER_0 0x0 +#define WQE_MARKER_6B 0x6b + +#define HIFC_SQE_SIZE 128 +#define HIFC_MIN_WP_NUM 2 + +/* Counter */ +#define HIFC_STAT_SESSION_IO + +/*************** PARENT SQ&Context defines *******************************/ +#define HIFC_MAX_MSN (65535) +#define HIFC_MSN_MASK (0xffff000000000000LL) +#define HIFC_SQE_TS_SIZE (72) +#define HIFC_SQE_FIRST_OBIT_DW_POS (0) +#define HIFC_SQE_SECOND_OBIT_DW_POS (30) +#define HIFC_SQE_OBIT_SET_MASK_BE (0x80) +#define HIFC_SQE_OBIT_CLEAR_MASK_BE (0xffffff7f) +#define HIFC_MAX_SQ_TASK_TYPE_CNT (128) + +/* + * Note: if the location of flush done bit changes, the definition must be + * modifyed again + */ +#define HIFC_CTXT_FLUSH_DONE_DW_POS (58) +#define HIFC_CTXT_FLUSH_DONE_MASK_BE (0x4000) + +#define HIFC_GET_SQ_HEAD(v_sq) \ + list_entry((&(v_sq)->list_linked_list_sq)->next,\ + struct hifc_sq_wqe_page_s, entry_wpg) +#define HIFC_GET_SQ_TAIL(v_sq) \ + list_entry((&(v_sq)->list_linked_list_sq)->prev, \ + struct hifc_sq_wqe_page_s, entry_wpg) +#ifdef HIFC_STAT_SESSION_IO +#define HIFC_SQ_IO_STAT(v_sq, io_type) \ + (atomic_inc(&(v_sq)->io_stat[io_type])) +#define HIFC_SQ_IO_STAT_READ(v_sq, io_type) \ + (atomic_read(&(v_sq)->io_stat[io_type])) +#endif +#define HIFC_GET_QUEUE_CMSN(v_sq)\ + ((unsigned int)(be64_to_cpu(((((v_sq)->queue_header)->ci_record) \ + & HIFC_MSN_MASK)))) +#define HIFC_GET_WP_END_CMSN(head_start_cmsn, wqe_num_per_buf) \ + (unsigned short)(((unsigned int)(head_start_cmsn) +\ + (unsigned int)(wqe_num_per_buf) - 1) % (HIFC_MAX_MSN + 1)) +#define HIFC_MSN_INC(msn) (((HIFC_MAX_MSN) == (msn)) ? 0 : ((msn) + 1)) +#define HIFC_MSN_DEC(msn) ((0 == (msn)) ? (HIFC_MAX_MSN) : ((msn) - 1)) +#define HIFC_QUEUE_MSN_OFFSET(start_cmsn, end_cmsn) \ + (unsigned int)((((unsigned int)(end_cmsn) + (HIFC_MAX_MSN)) - \ + (unsigned int)(start_cmsn)) % (HIFC_MAX_MSN + 1)) + +/******************* ROOT SQ&RQ defines ***********************************/ +#define HIFC_ROOT_Q_CTX_SIZE (48) +#define HIFC_ROOT_Q_CTX_CI_WQE_HI_SHIFT (44) +#define HIFC_ROOT_Q_CTX_CI_WQE_LOW_SHIFT (12) +#define HIFC_ROOT_Q_CTX_CLA_HI_SHIFT (41) +#define HIFC_ROOT_Q_CTX_CLA_LOW_SHIFT (9) +#define HIFC_ROOT_TSO_LRO_SPACE (0) +#define HIFC_ROOT_CTX_WQE_PREFETCH_MAX (3) +#define HIFC_ROOT_CTX_WQE_PREFETCH_MIN (1) +#define HIFC_ROOT_CTX_WQE_PRERETCH_THRESHOLD (2) +#define HIFC_CI_WQE_PAGE_HIGH_ADDR(x) \ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CI_WQE_HI_SHIFT) & 0xffffffff) +#define HIFC_CI_WQE_PAGE_LOW_ADDR(x) \ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CI_WQE_LOW_SHIFT) & 0xffffffff) +#define HIFC_CLA_HIGH_ADDR(x)\ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CLA_HI_SHIFT) & 0xffffffff) +#define HIFC_CLA_LOW_ADDR(x) \ + (unsigned int)(((x) >> HIFC_ROOT_Q_CTX_CLA_LOW_SHIFT) & 0xffffffff) + +/*********************** ROOT SQ defines ***********************************/ +#define HIFC_ROOT_SQ_NUM (1) +#define HIFC_ROOT_SQ_DEPTH (2048) +#define HIFC_ROOT_SQ_WQEBB (64) +#define HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE (4) +#define HIFC_ROOT_SQ_LOOP_OWNER (1) +#define HIFC_ROOT_SQ_CI_ATTRIBUTE_ADDRESS_SHIFT (2) +#define HIFC_DOORBELL_SQ_TYPE (1) +#define HIFC_DOORBELL_SQ_PI_HIGH_BITS_SHIFT (8) +#define HIFC_DOORBELL_SQ_PI_LOW_BITS_MASK (0xFF) +#define HIFC_INT_NUM_PER_QUEUE (1) +#define HIFC_INT_ENABLE (1) +#define HIFC_ROOT_CFG_SQ_NUM_MAX (42) +#define HIFC_CMDQ_QUEUE_TYPE_SQ (0) +#define HIFC_GET_ROOT_SQ_CI_ADDR(addr, index) \ + ((addr) + (unsigned int)((index) * HIFC_ROOT_SQ_CI_TABLE_STEP_BYTE)) +#define HIFC_ROOT_SQ_CTX_OFFSET(q_num, q_id) \ + ((HIFC_ROOT_TSO_LRO_SPACE * 2 * (q_num) +\ + HIFC_ROOT_Q_CTX_SIZE * (q_id)) / 16) + +/********************** ROOT RQ defines ***********************************/ +#define HIFC_ROOT_RQ_NUM (1) +#define HIFC_ROOT_RQ_DEPTH (1024) +#define HIFC_ROOT_RQ_WQEBB (32) +#define HIFC_ROOT_RQ_PI_TABLE_STEP_BYTE (4) +#define HIFC_ROOT_RQ_LOOP_OWNER (1) +#define HIFC_ROOT_RQ_RECV_BUFF_SIZE (1024) +#define HIFC_ROOT_Q_INT_ID_MAX (1024) /* 10bit */ +#define HIFC_ROOT_CFG_RQ_NUM_MAX (42) +#define HIFC_CMDQ_QUEUE_TYPE_RQ (1) +#define HIFC_RQE_MAX_PROCESS_NUM_PER_INTR (128) +#define HIFC_ROOT_RQ_CTX_OFFSET(q_num, q_id)\ + (((HIFC_ROOT_TSO_LRO_SPACE * 2 + HIFC_ROOT_Q_CTX_SIZE) * (q_num) +\ + HIFC_ROOT_Q_CTX_SIZE * (q_id)) / 16) + +/************************** SCQ defines ***********************************/ +#define HIFC_SCQ_INT_ID_MAX (2048) /* 11BIT */ +#define HIFC_SCQE_SIZE (64) +#define HIFC_CQE_GPA_SHIFT (4) +#define HIFC_NEXT_CQE_GPA_SHIFT (12) +/* 1-Update Ci by Tile, 0-Update Ci by Hardware */ +#define HIFC_PMSN_CI_TYPE_FROM_HOST (0) +#define HIFC_PMSN_CI_TYPE_FROM_UCODE (1) +#define HIFC_ARMQ_IDLE (0) +#define HIFC_CQ_INT_MODE (2) +#define HIFC_CQ_HEADER_OWNER_SHIFT (15) + +/* + * SCQC_CQ_DEPTH: 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k. + * include LinkWqe + */ +#define HIFC_CMD_SCQ_DEPTH (4096) +#define HIFC_STS_SCQ_DEPTH (8192) + +#define HIFC_CMD_SCQC_CQ_DEPTH (hifc_log2n(HIFC_CMD_SCQ_DEPTH >> 8)) +#define HIFC_STS_SCQC_CQ_DEPTH (hifc_log2n(HIFC_STS_SCQ_DEPTH >> 8)) +#define HIFC_STS_SCQ_CI_TYPE HIFC_PMSN_CI_TYPE_FROM_HOST + +#define HIFC_CMD_SCQ_CI_TYPE HIFC_PMSN_CI_TYPE_FROM_UCODE +#define HIFC_SCQ_INTR_LOW_LATENCY_MODE 0 +#define HIFC_SCQ_INTR_POLLING_MODE 1 + +#define HIFC_CQE_MAX_PROCESS_NUM_PER_INTR (128) +#define HIFC_SESSION_SCQ_NUM (16) + +/* + * SCQ[0, 2, 4 ...]CMD SCQ,SCQ[1, 3, 5 ...]STS SCQ,SCQ[HIFC_TOTAL_SCQ_NUM-1] + * Defaul SCQ + */ +#define HIFC_CMD_SCQN_START (0) +#define HIFC_STS_SCQN_START (1) +#define HIFC_SCQS_PER_SESSION (2) + +#define HIFC_TOTAL_SCQ_NUM (HIFC_SESSION_SCQ_NUM + 1) + +#define HIFC_SCQ_IS_STS(scq_index) \ + (((scq_index) % HIFC_SCQS_PER_SESSION) || \ + ((scq_index) == HIFC_SESSION_SCQ_NUM)) +#define HIFC_SCQ_IS_CMD(scq_index)\ + (!HIFC_SCQ_IS_STS(scq_index)) +#define HIFC_RPORTID_TO_CMD_SCQN(rport_index) \ + (((rport_index) * HIFC_SCQS_PER_SESSION) % HIFC_SESSION_SCQ_NUM) +#define HIFC_RPORTID_TO_STS_SCQN(rport_index) \ + ((((rport_index) * HIFC_SCQS_PER_SESSION) + 1) % HIFC_SESSION_SCQ_NUM) + +/************************** SRQ defines ***********************************/ +#define HIFC_SRQE_SIZE (32) +#define HIFC_SRQ_INIT_LOOP_O (1) +#define HIFC_QUEUE_RING (1) +#define HIFC_SRQ_ELS_DATA_NUM (1) +#define HIFC_SRQ_ELS_SGE_LEN (256) +#define HIFC_SRQ_ELS_DATA_DEPTH (4096) + +#define HIFC_IRQ_NAME_MAX (30) + +/* Support 2048 sessions(xid) */ +#define HIFC_CQM_XID_MASK (0x7ff) + +#define HIFC_QUEUE_FLUSH_DOING (0) +#define HIFC_QUEUE_FLUSH_DONE (1) +#define HIFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS (2000) +#define HIFC_QUEUE_FLUSH_WAIT_MS (2) + +/************************* RPort defines ***********************************/ +#define HIFC_EXIT_STRIDE (4096) +#define UNF_HIFC_MAXRPORT_NUM (2048) +#define HIFC_RPORT_OFFLOADED(prnt_qinfo) \ + ((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_OFFLOADED) +#define HIFC_RPORT_NOT_OFFLOADED(prnt_qinfo) \ + ((prnt_qinfo)->offload_state != HIFC_QUEUE_STATE_OFFLOADED) +#define HIFC_RPORT_FLUSH_NOT_NEEDED(prnt_qinfo)\ + (((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_INITIALIZED) || \ + ((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_OFFLOADING) || \ + ((prnt_qinfo)->offload_state == HIFC_QUEUE_STATE_FREE)) +#define HIFC_CHECK_XID_MATCHED(sq_xid, sqe_xid) \ + (((sq_xid) & HIFC_CQM_XID_MASK) == ((sqe_xid) & HIFC_CQM_XID_MASK)) +#define HIFC_PORT_MODE_TGT (0) /* Port mode */ +#define HIFC_PORT_MODE_INI (1) +#define HIFC_PORT_MODE_BOTH (2) + +/********** Hardware Reserved Queue Info defines ***************************/ +#define HIFC_HRQI_SEQ_ID_MAX (255) +#define HIFC_HRQI_SEQ_INDEX_MAX (64) +#define HIFC_HRQI_SEQ_INDEX_SHIFT (6) +#define HIFC_HRQI_SEQ_SEPCIAL_ID (3) +#define HIFC_HRQI_SEQ_INVALID_ID (~0LL) + +/************************* OQID defines ***********************************/ + +#define HIFC_OQID_HOST_XID_OFFSET (5) +#define HIFC_OQID_HOST_RW_OFFSET (4) +#define HIFC_OQID_HOST_ST_OFFSET (2) +#define HIFC_OQID_HOST_OQID_LEN (11) +#define HIFC_OQID_HOST_READ_FROM_HOST (0UL) +#define HIFC_OQID_HOST_WRITE_TO_HOST (1) +#define HIFC_CPI_CHNL_ID_XOE_READ (1UL) +#define HIFC_CPI_CHNL_ID_XOE_WRITE (3UL) +#define HIFC_SERVICE_TYPE_FC_FCOE (2) +/********************* sdk config defines ***********************************/ +#define HIFC_CNTX_SIZE_256B 256 +#define HIFC_QUEUE_LINK_STYLE 0 +#define HIFC_PACKET_COS_FC_CMD 0 +#define HIFC_PACKET_COS_FC_DATA 1 +#define HIFC_DB_ARM_DISABLE 0 +#define HIFC_DMA_ATTR_OFST 0 +#define HIFC_PCIE_TEMPLATE 0 +#define HIFC_PCIE_RELAXED_ORDERING 1 +#define HIFC_OWNER_DRIVER_PRODUCT 1 +#define HIFC_CMDQE_BUFF_LEN_MAX 2040 +#define HIFC_CNTX_SIZE_T_256B 0 + +#define HIFC_OQID_IO_HOST_SET(xid, rw, cidx, vf_id, m, oqid) \ + { \ + oqid = (unsigned short)(((unsigned short)\ + ((xid) << HIFC_OQID_HOST_XID_OFFSET)) \ + | ((unsigned short)((rw) << HIFC_OQID_HOST_RW_OFFSET)) \ + | ((unsigned short)(HIFC_SERVICE_TYPE_FC_FCOE << \ + HIFC_OQID_HOST_ST_OFFSET)) | (cidx)); \ + oqid = (unsigned short)\ + (((unsigned short)(oqid & (0x7ff >> (m))))\ + | ((unsigned short)((vf_id) << \ + (HIFC_OQID_HOST_OQID_LEN - (m))))); \ + } + +#define HIFC_OQID_RD(xid, vf_id, m, oq_id) \ + HIFC_OQID_IO_HOST_SET(xid, HIFC_OQID_HOST_READ_FROM_HOST,\ + HIFC_CPI_CHNL_ID_XOE_READ, vf_id, m, oq_id) + +#define HIFC_OQID_WR(xid, vf_id, m, oq_id) \ + HIFC_OQID_IO_HOST_SET(xid, HIFC_OQID_HOST_WRITE_TO_HOST,\ + HIFC_CPI_CHNL_ID_XOE_WRITE, vf_id, m, oq_id) + +enum hifc_session_reset_mode_e { + HIFC_SESS_RST_DELETE_IO_ONLY = 1, + HIFC_SESS_RST_DELETE_CONN_ONLY = 2, + HIFC_SESS_RST_DELETE_IO_CONN_BOTH = 3, + HIFC_SESS_RST_MODE_BUTT +}; + +/* linkwqe */ +#define CQM_LINK_WQE_CTRLSL_VALUE 2 +#define CQM_LINK_WQE_LP_VALID 1 +#define CQM_LINK_WQE_LP_INVALID 0 + +/****************** ROOT SQ&RQ&CTX defines ****************************/ +struct nic_tx_doorbell { + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 srv_type : 5; + u32 cos : 3; + u32 c_flag : 1; + u32 rsvd0 : 5; + u32 queue_id : 10; + u32 pi_high : 8; +#else + u32 pi_high : 8; + u32 queue_id : 10; + u32 rsvd0 : 5; + u32 c_flag : 1; + u32 cos : 3; + u32 srv_type : 5; +#endif + } bs0; + u32 dw0; + }; + + u32 rsvd1; +}; + +struct hifc_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u32 addr_offset; +}; + +/* + * nic_sq_ctx_1822 table define + */ +struct hifc_sq_ctxt { + union { + struct sq_ctx_dw0 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* whether generate CEQ */ + u32 ceq_arm : 1; + u32 rsvd1 : 7; + /* whether enable CEQ */ + u32 ceq_en : 1; + u32 global_sq_id : 10; + u32 ceq_num : 5; + u32 pkt_template : 6; + u32 rsvd2 : 2; +#else + u32 rsvd2 : 2; + u32 pkt_template : 6; + u32 ceq_num : 5; + u32 global_sq_id : 10; + /* whether enable CEQ */ + u32 ceq_en : 1; + u32 rsvd1 : 7; + /* whether generate CEQ */ + u32 ceq_arm : 1; +#endif + } sq_ctx_dw0; + u32 dw0; + }; + + union { + struct sq_ctx_dw1 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 wqe_template : 6; + u32 rsvd3 : 2; + u32 owner : 1; + /* customer index */ + u32 ci : 12; + u32 tso_doing : 1; + /* indicate how many sge left in current tso wqe */ + u32 sge_num_left : 6; + /* number of sge processing */ + u32 processing_sge : 3; + u32 rsvd4 : 1; +#else + u32 rsvd4 : 1; + /* number of sge processing */ + u32 processing_sge : 3; + /* indicate how many sge left in current tso wqe */ + u32 sge_num_left : 6; + u32 tso_doing : 1; + /* customer index */ + u32 ci : 12; + u32 owner : 1; + u32 rsvd3 : 2; + u32 wqe_template : 6; +#endif + } sq_ctx_dw1; + u32 dw1; + }; + + union { + struct sq_ctx_dw2 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd5 : 12; + /* the wqe page address that current ci point to */ + u32 ci_wqe_page_addr_hi : 20; +#else + /* the wqe page address that current ci point to */ + u32 ci_wqe_page_addr_hi : 20; + u32 rsvd5 : 12; +#endif + } sq_ctx_dw2; + u32 dw2; + }; + + u32 ci_wqe_page_addr_lo; + + union { + struct sq_ctx_dw4 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * The minimum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_min : 7; + /* + * The maximum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_max : 11; + u32 prefetch_cache_threshold : 14; +#else + u32 prefetch_cache_threshold : 14; + /* + * The maximum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_max : 11; + /* + * The minimum prefetch WQE cacheline number of this SQ + */ + u32 prefetch_min : 7; +#endif + } sq_ctx_dw4; + u32 dw4; + }; + + union { + struct sq_ctx_dw5 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd6 : 31; + u32 prefetch_owner : 1; +#else + u32 prefetch_owner : 1; + u32 rsvd6 : 31; +#endif + } sq_ctx_dw5; + u32 dw5; + }; + + union { + struct sq_ctx_dw6 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 prefetch_ci : 12; + u32 prefetch_ci_wqe_addr_hi : 20; +#else + u32 prefetch_ci_wqe_addr_hi : 20; + u32 prefetch_ci : 12; +#endif + } sq_ctx_dw6; + u32 dw6; + }; + + u32 prefetch_ci_wqe_addr_lo; + + union { + struct sq_ctx_dw8 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* processed length of current seg */ + u32 processed_seg_len : 16; + u32 rsvd7 : 16; +#else + u32 rsvd7 : 16; + /* processed length of current seg */ + u32 processed_seg_len : 16; +#endif + } sq_ctx_dw8; + u32 dw8; + }; + + u32 qsf; + + union { + struct sq_ctx_dw10 { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd8 : 9; + /* CI CLA table address */ + u32 cla_addr_hi : 23; +#else + /* CI CLA table address */ + u32 cla_addr_hi : 23; + u32 rsvd8 : 9; +#endif + } sq_ctx_dw10; + u32 dw10; + }; + + u32 cla_addr_lo; +}; + +struct hifc_sq_ctxt_block { + struct hifc_qp_ctxt_header cmdq_hdr; + struct hifc_sq_ctxt sq_ctx[HIFC_ROOT_CFG_SQ_NUM_MAX]; +}; + +/* + * nic_rq_ctx_1822 table define + */ +struct hifc_rq_ctxt { + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 max_count : 10; + u32 cqe_tmpl : 6; + u32 pkt_tmpl : 6; + u32 wqe_tmpl : 6; + u32 psge_valid : 1; + u32 rsvd1 : 1; + u32 owner : 1; + u32 ceq_en : 1; +#else + u32 ceq_en : 1; + u32 owner : 1; + u32 rsvd1 : 1; + u32 psge_valid : 1; + u32 wqe_tmpl : 6; + u32 pkt_tmpl : 6; + u32 cqe_tmpl : 6; + u32 max_count : 10; +#endif + } bs; + u32 dw0; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * Interrupt number that L2NIC engine tell SW if + * generate int instead of CEQ + */ + u32 int_num : 10; + u32 ceq_count : 10; + /* product index */ + u32 pi : 12; +#else + /* product index */ + u32 pi : 12; + u32 ceq_count : 10; + /* + * Interrupt number that L2NIC engine tell SW if + * generate int instead of CEQ + */ + u32 int_num : 10; +#endif + } bs0; + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* + * CEQ arm, L2NIC engine will clear it after send ceq, + * driver should set it by CMD Q after receive all pkt. + */ + u32 ceq_arm : 1; + u32 eq_id : 5; + u32 rsvd2 : 4; + u32 ceq_count : 10; + /* product index */ + u32 pi : 12; +#else + /* product index */ + u32 pi : 12; + u32 ceq_count : 10; + u32 rsvd2 : 4; + u32 eq_id : 5; + /* CEQ arm, L2NIC engine will clear it after send ceq, + * driver should set it by CMD Q after receive all pkt. + */ + u32 ceq_arm : 1; +#endif + } bs1; + u32 dw1; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* consumer index */ + u32 ci : 12; + /* WQE page address of current CI point to, high part */ + u32 ci_wqe_page_addr_hi : 20; +#else + /* WQE page address of current CI point to, high part */ + u32 ci_wqe_page_addr_hi : 20; + /* consumer index */ + u32 ci : 12; +#endif + } bs2; + u32 dw2; + }; + + /* WQE page address of current CI point to, low part */ + u32 ci_wqe_page_addr_lo; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 prefetch_min : 7; + u32 prefetch_max : 11; + u32 prefetch_cache_threshold : 14; +#else + u32 prefetch_cache_threshold : 14; + u32 prefetch_max : 11; + u32 prefetch_min : 7; +#endif + } bs3; + u32 dw3; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd3 : 31; + /* ownership of WQE */ + u32 prefetch_owner : 1; +#else + /* ownership of WQE */ + u32 prefetch_owner : 1; + u32 rsvd3 : 31; +#endif + } bs4; + u32 dw4; + }; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 prefetch_ci : 12; + /* high part */ + u32 prefetch_ci_wqe_page_addr_hi : 20; +#else + /* high part */ + u32 prefetch_ci_wqe_page_addr_hi : 20; + u32 prefetch_ci : 12; +#endif + } bs5; + u32 dw5; + }; + + /* low part */ + u32 prefetch_ci_wqe_page_addr_lo; + /* host mem GPA, high part */ + u32 pi_gpa_hi; + /* host mem GPA, low part */ + u32 pi_gpa_lo; + + union { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 rsvd4 : 9; + u32 ci_cla_tbl_addr_hi : 23; +#else + u32 ci_cla_tbl_addr_hi : 23; + u32 rsvd4 : 9; +#endif + } bs6; + u32 dw6; + }; + + u32 ci_cla_tbl_addr_lo; +}; + +struct hifc_rq_ctxt_block { + struct hifc_qp_ctxt_header cmdq_hdr; + struct hifc_rq_ctxt rq_ctx[HIFC_ROOT_CFG_RQ_NUM_MAX]; +}; + +struct hifc_root_qsf_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* packet priority, engine pass pri to ucode */ + u32 pri : 3; + /* unicast flag, engine pass uc to ucode */ + u32 uc : 1; + /* sctp packet, engine pass sctp to ucode */ + u32 sctp : 1; + /* mss */ + u32 mss : 14; + /* when set, hi1822 calculates the tcp/udp check sum of the packet */ + u32 tcp_udp_cs : 1; + /* + * transmit segmentation offload is activated when the tso flag is set + */ + u32 tso : 1; + /* for udp packet, engine read the whole udp packet from host by 1 dma + * read, and ipsu calculate udp checksum, ucode do ip segment + */ + u32 ufo : 1; + /* payload offset. it is the start position to calculate tcp/udp + * checksum or sctp crc + */ + u32 payload_offset : 8; + /* reserved */ + u32 route_to_ucode : 2; +#else + /* reserved */ + u32 route_to_ucode : 2; + /* + * payload offset. it is the start position to calculate tcp/udp + * checksum or sctp crc + */ + u32 payload_offset : 8; + /* + * for udp packet, engine read the whole udp packet from host by 1 dma + * read, and ipsu calculate udp checksum, ucode do ip segment + */ + u32 ufo : 1; + /* + * transmit segmentation offload is activated when the tso flag is set + */ + u32 tso : 1; + /* when set, hi1822 calculates the tcp/udp check sum of the packet */ + u32 tcp_udp_cs : 1; + /* mss */ + u32 mss : 14; + /* sctp packet, engine pass sctp to ucode */ + u32 sctp : 1; + /* unicast flag, engine pass uc to ucode */ + u32 uc : 1; + /* packet priority, engine pass pri to ucode */ + u32 pri : 3; +#endif +}; + +struct hifc_root_db_addr_s { + unsigned long long phy_addr; + void __iomem *virt_map_addr; +}; + +/* send queue management structure */ +struct hifc_root_sq_info_s { + spinlock_t root_sq_spin_lock; + + unsigned short qid; + unsigned short max_qnum; + unsigned short pi; /* ring buffer Pi */ + unsigned short ci; /* ring buffer Ci */ + unsigned short owner; + unsigned short hardware_write_back_value; + unsigned short q_depth; + unsigned short wqe_bb_size; /* WQE Basic size */ + + char irq_name[HIFC_IRQ_NAME_MAX]; + unsigned int irq_id; + unsigned short msix_entry_idx; + + unsigned short *ci_addr; + dma_addr_t ci_dma_addr; + + unsigned long long cla_addr; + void *sq_handle; + struct hifc_root_db_addr_s direct_db; + struct hifc_root_db_addr_s normal_db; + unsigned int db_idx; + unsigned int global_qpn; + int in_flush; + void *root_info; +}; + +struct hifc_root_rq_info_s { + unsigned short qid; + unsigned short max_qnum; + unsigned short pi; + unsigned short ci; + unsigned short owner; + + unsigned short q_depth; + unsigned short q_mask; + unsigned short wqe_bb_size; + + char irq_name[HIFC_IRQ_NAME_MAX]; + unsigned int irq_id; + unsigned short msix_entry_idx; + + unsigned short *pi_vir_addr; + dma_addr_t pi_dma_addr; + + /* Root RQ Receive Buffer size and completion buff */ + unsigned int rqc_buff_size; + void *rq_completion_buff; + dma_addr_t rq_completion_dma; + unsigned int rq_rcv_buff_size; + void *rq_rcv_buff; + dma_addr_t rq_rcv_dma; + void *rq_handle; + + /* for queue context init */ + unsigned long long ci_cla_tbl_addr; + + unsigned int global_qpn; + struct tasklet_struct tasklet; + atomic_t flush_state; + + void *root_info; +}; + +struct hifc_root_info_s { + void *phba; + unsigned int sq_num; + unsigned int sq_ci_table_size; + void *virt_sq_ci_table_buff; + dma_addr_t sq_ci_table_dma; + void *sq_info; + + unsigned int rq_num; + unsigned int rq_pi_table_size; + void *virt_rq_pi_table_buff; + dma_addr_t rq_pi_table_dma; + void *rq_info; +}; + +/**************************** SCQ defines ********************************/ +struct hifc_scq_info_s { + struct cqm_queue_s *cqm_scq_info; + unsigned int wqe_num_per_buf; + unsigned int wqe_size; + /* 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k */ + unsigned int scqc_cq_depth; + unsigned short scqc_ci_type; + unsigned short valid_wqe_num; /* ScQ depth include link wqe */ + unsigned short ci; + unsigned short ci_owner; + + unsigned int queue_id; + unsigned int scqn; + char irq_name[HIFC_IRQ_NAME_MAX]; + unsigned short msix_entry_idx; + unsigned int irq_id; + struct tasklet_struct tasklet; + atomic_t flush_state; + + void *phba; + unsigned int reserved; + struct task_struct *delay_task; + int task_exit; + unsigned int intrmode; +}; + +/************************* SRQ depth ***********************************/ +struct hifc_srq_ctx_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + /* DW0 */ + unsigned long long last_rq_pmsn : 16; + unsigned long long cur_rqe_msn : 16; + unsigned long long cur_rqe_user_id : 16; + unsigned long long parity : 8; + unsigned long long rsvd0 : 2; + unsigned long long pcie_template : 6; + + /* DW1 */ + unsigned long long cur_rqe_gpa; + + /* DW2 */ + unsigned long long cur_sge_v : 1; + unsigned long long cur_sge_l : 1; + unsigned long long int_mode : 2; + unsigned long long ceqn_msix : 11; + unsigned long long cur_sge_remain_len : 17; + unsigned long long cur_sge_id : 4; + unsigned long long consant_sge_len : 17; + unsigned long long cur_wqe : 1; + unsigned long long pmsn_type : 1; + unsigned long long bdsl : 4; + unsigned long long cr : 1; + unsigned long long csl : 2; + unsigned long long cf : 1; + unsigned long long ctrl_sl : 1; + + /* DW3 */ + unsigned long long cur_sge_gpa; + + /* DW4 */ + unsigned long long cur_pmsn_gpa; + + /* DW5 */ + unsigned long long pre_fetch_max_msn : 16; + unsigned long long cqe_max_cnt : 8; + unsigned long long cur_cqe_cnt : 8; + unsigned long long arm_q : 1; + unsigned long long rsvd1 : 7; + unsigned long long cq_so_ro : 2; + unsigned long long cqe_dma_attr_idx : 6; + unsigned long long rq_so_ro : 2; + unsigned long long rqe_dma_attr_idx : 6; + unsigned long long rsvd2 : 1; + unsigned long long loop_o : 1; + unsigned long long ring : 1; + unsigned long long rsvd3 : 5; + +#else + /* DW0 */ + unsigned long long pcie_template : 6; + unsigned long long rsvd0 : 2; + unsigned long long parity : 8; + unsigned long long cur_rqe_user_id : 16; + unsigned long long cur_rqe_msn : 16; + unsigned long long last_rq_pmsn : 16; + + /* DW1 */ + unsigned long long cur_rqe_gpa; + + /* DW2 */ + unsigned long long ctrl_sl : 1; + unsigned long long cf : 1; + unsigned long long csl : 2; + unsigned long long cr : 1; + unsigned long long bdsl : 4; + unsigned long long pmsn_type : 1; + unsigned long long cur_wqe : 1; + unsigned long long consant_sge_len : 17; + unsigned long long cur_sge_id : 4; + unsigned long long cur_sge_remain_len : 17; + unsigned long long ceqn_msix : 11; + unsigned long long int_mode : 2; + unsigned long long cur_sge_l : 1; + unsigned long long cur_sge_v : 1; + + /* DW3 */ + unsigned long long cur_sge_gpa; + + /* DW4 */ + unsigned long long cur_pmsn_gpa; + + /* DW5 */ + unsigned long long rsvd3 : 5; + unsigned long long ring : 1; + unsigned long long loop_o : 1; + unsigned long long rsvd2 : 1; + unsigned long long rqe_dma_attr_idx : 6; + unsigned long long rq_so_ro : 2; + unsigned long long cqe_dma_attr_idx : 6; + unsigned long long cq_so_ro : 2; + unsigned long long rsvd1 : 7; + unsigned long long arm_q : 1; + unsigned long long cur_cqe_cnt : 8; + unsigned long long cqe_max_cnt : 8; + unsigned long long pre_fetch_max_msn : 16; + +#endif + + /* DW6~DW7 */ + unsigned long long rsvd4; + unsigned long long rsvd5; + +}; + +struct hifc_srq_buff_entry_s { + unsigned short buff_id; + void *buff_addr; + dma_addr_t buff_dma; +}; + +enum hifc_clean_state_e { + HIFC_CLEAN_DONE, + HIFC_CLEAN_DOING, + HIFC_CLEAN_BUTT +}; + +enum hifc_srq_type_e { + HIFC_SRQ_ELS = 1, + HIFC_SRQ_BUTT +}; + +struct hifc_srq_info_s { + enum hifc_srq_type_e srq_type; + + struct cqm_queue_s *cqm_srq_info; + /* Wqe number per buf, dont't inlcude link wqe */ + unsigned int wqe_num_per_buf; + unsigned int wqe_size; + /* valid wqe number, dont't include link wqe */ + unsigned int valid_wqe_num; + unsigned short pi; + unsigned short pi_owner; + unsigned short pmsn; + unsigned short ci; + unsigned short cmsn; + unsigned int srqn; + + dma_addr_t first_rqe_rcv_dma; + + struct hifc_srq_buff_entry_s *els_buff_entry_head; + struct buf_describe_s buff_list; + spinlock_t srq_spin_lock; + int spin_lock_init; + int enable; + enum hifc_clean_state_e state; + struct delayed_work del_work; + unsigned int del_retry_time; + void *phba; +}; + +/* + * The doorbell record keeps PI of WQE, which will be produced next time. + * The PI is 15 bits width o-bit + */ +struct hifc_db_record { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u64 rsvd0 : 32; + unsigned long long dump_pmsn : 16; + unsigned long long pmsn : 16; +#else + unsigned long long pmsn : 16; + unsigned long long dump_pmsn : 16; + u64 rsvd0 : 32; +#endif +}; + +/* + * The ci record keeps CI of WQE, which will be consumed next time. + * The ci is 15 bits width with 1 o-bit + */ +struct hifc_ci_record_s { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u64 rsvd0 : 32; + unsigned long long dump_cmsn : 16; + unsigned long long cmsn : 16; +#else + unsigned long long cmsn : 16; + unsigned long long dump_cmsn : 16; + u64 rsvd0 : 32; +#endif +}; + +/* The accumulate data in WQ header */ +struct hifc_accumulate { + u64 data_2_uc; + u64 data_2_drv; +}; + +/* The WQ header structure */ +struct hifc_wq_header_s { + struct hifc_db_record db_record; + struct hifc_ci_record_s ci_record; + struct hifc_accumulate soft_data; + +}; + +/* Link list Sq WqePage Pool */ +/* queue header struct */ +struct hifc_queue_header_s { + unsigned long long doorbell_record; + unsigned long long ci_record; + unsigned long long ulrsv1; + unsigned long long ulrsv2; +}; + +/* WPG-WQEPAGE, LLSQ-LINKED LIST SQ */ +struct hifc_sq_wqe_page_s { + struct list_head entry_wpg; + /* Wqe Page virtual addr */ + void *wpg_addr; + /* Wqe Page physical addr */ + unsigned long long wpg_phy_addr; +}; + +struct hifc_sq_wqe_page_pool_s { + unsigned int wpg_cnt; + unsigned int wpg_size; + unsigned int wqe_per_wpg; + + /* PCI DMA Pool */ + struct dma_pool *wpg_dma_pool; + struct hifc_sq_wqe_page_s *wpg_pool_addr; + struct list_head list_free_wpg_pool; + spinlock_t wpg_pool_lock; + atomic_t wpg_in_use; +}; + +#define HIFC_SQ_DEL_STAGE_TIMEOUT_MS (3 * 1000) +#define HIFC_SRQ_DEL_STAGE_TIMEOUT_MS (10 * 1000) +#define HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS (10) +#define HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT (3) + +#define HIFC_SRQ_PROCESS_DELAY_MS (20) + +/* PLOGI parameters */ +struct hifc_plogi_coparams_s { + unsigned int seq_cnt : 1; + unsigned int ed_tov : 1; + unsigned int reserved : 14; + unsigned int tx_mfs : 16; + unsigned int ed_tov_timer_val; +}; + +struct hifc_delay_sqe_ctrl_info_s { + int valid; + unsigned int rport_index; + unsigned int time_out; + unsigned long long start_jiff; + unsigned int sid; + unsigned int did; + struct hifc_root_sqe_s sqe; +}; + +struct hifc_destroy_ctrl_info_s { + int valid; + unsigned int rport_index; + unsigned int time_out; + unsigned long long start_jiff; + struct unf_rport_info_s rport_info; +}; + +/* PARENT SQ Info */ +struct hifc_parent_sq_info_s { + void *phba; + + spinlock_t parent_sq_enqueue_lock; + atomic_t wqe_page_cnt; + unsigned int rport_index; + + unsigned int context_id; + + /* Fixed value,used for Doorbell */ + unsigned int sq_queue_id; + + /* When a session is offloaded, tile will return the CacheId to the + * driver,which is used for Doorbell + */ + unsigned int cache_id; + + /* service type, fc */ + unsigned int service_type; + + /* OQID */ + unsigned short oqid_rd; + unsigned short oqid_wr; + + unsigned int max_sqe_num; /* SQ depth */ + unsigned int wqe_num_per_buf; + unsigned int wqe_size; + + unsigned int wqe_offset; + unsigned short head_start_cmsn; + unsigned short head_end_cmsn; + unsigned short last_pmsn; + unsigned short last_pi_owner; + + unsigned int local_port_id; + unsigned int remote_port_id; + int port_in_flush; + int sq_in_sess_rst; + atomic_t sq_valid; + + void *queue_header_original; + struct hifc_queue_header_s *queue_header; + dma_addr_t queue_hdr_phy_addr_original; + dma_addr_t queue_hdr_phy_addr; + + /* Linked List SQ */ + struct list_head list_linked_list_sq; + + unsigned char vport_id; + struct delayed_work del_work; + struct delayed_work flush_done_tmo_work; + unsigned long long del_start_jiff; + dma_addr_t srq_ctx_addr; + atomic_t sq_cashed; + atomic_t fush_done_wait_cnt; + + struct hifc_plogi_coparams_s plogi_coparams; + + /* dif control info for immi */ + struct unf_dif_control_info_s sirt_dif_control; + + atomic_t sq_dbl_cnt; + atomic_t sq_wqe_cnt; + atomic_t sq_cqe_cnt; + atomic_t sqe_minus_cqe_cnt; + + struct hifc_delay_sqe_ctrl_info_s delay_sqe; + struct hifc_destroy_ctrl_info_s destroy_sqe; + atomic_t io_stat[HIFC_MAX_SQ_TASK_TYPE_CNT]; + +}; + +/* parent context doorbell */ +struct hifc_parent_sq_db_s { + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 service_type : 5; + u32 cos : 3; + u32 c : 1; + u32 arm : 1; + u32 cntx_size : 2; + u32 vport : 7; + u32 xid : 13; +#else + u32 xid : 13; + u32 vport : 7; + u32 cntx_size : 2; + u32 arm : 1; + u32 c : 1; + u32 cos : 3; + u32 service_type : 5; +#endif + } wd0; + + struct { +#if (__BYTE_ORDER__ == __BIG_ENDIAN__) + u32 qid : 4; + u32 sm_data : 20; + u32 pi_hi : 8; +#else + u32 pi_hi : 8; + u32 sm_data : 20; + u32 qid : 4; +#endif + } wd1; + +}; + +struct hifc_parent_cmd_scq_info_s { + unsigned int cqm_queue_id; + unsigned int local_queue_id; +}; + +struct hifc_parent_st_scq_info_s { + unsigned int cqm_queue_id; + unsigned int local_queue_id; +}; + +struct hifc_parent_els_srq_info_s { + unsigned int cqm_queue_id; + unsigned int local_queue_id; +}; + +enum hifc_parent_queue_state_e { + HIFC_QUEUE_STATE_INITIALIZED = 0, + HIFC_QUEUE_STATE_OFFLOADING = 1, + HIFC_QUEUE_STATE_OFFLOADED = 2, + HIFC_QUEUE_STATE_DESTROYING = 3, + HIFC_QUEUE_STATE_FREE = 4, + HIFC_QUEUE_STATE_BUTT +}; + +struct hifc_parent_ctx_s { + dma_addr_t parent_ctx; + /* Allocated by driver, Driver filled it when a session offload */ + void *virt_parent_ctx; + /* Allocated by CQM,used by Hardware */ + struct cqm_qpc_mpt_s *cqm_parent_ctx_obj; +}; + +struct hifc_parent_queue_info_s { + spinlock_t parent_queue_state_lock; + struct hifc_parent_ctx_s parent_ctx; + enum hifc_parent_queue_state_e offload_state; + struct hifc_parent_sq_info_s parent_sq_info; + /* Cmd Scq info which is assocaiated with parent queue */ + struct hifc_parent_cmd_scq_info_s parent_cmd_scq_info; + /* Sts Scq info which is assocaiated with parent queue */ + struct hifc_parent_st_scq_info_s parent_sts_scq_info; + /* ELS Srq info which is assocaiated with parent queue */ + unsigned char queue_vport_id; + struct hifc_parent_els_srq_info_s parent_els_srq_info; + unsigned char queue_data_cos; +}; + +struct hifc_parent_queue_mgr_s { + struct hifc_parent_queue_info_s parent_queues[UNF_HIFC_MAXRPORT_NUM]; + struct buf_describe_s parent_sq_buf_list; +}; + +struct hifc_get_global_base_qpn_s { + /* for new version interface */ + unsigned char status; + unsigned char version; + unsigned char rsvd0[6]; + + unsigned short func_id; + unsigned short base_qpn; +}; + +#define HIFC_SRQC_BUS_ROW 8 +#define HIFC_SRQC_BUS_COL 19 +#define HIFC_SQC_BUS_ROW 8 +#define HIFC_SQC_BUS_COL 13 +#define HIFC_HW_SCQC_BUS_ROW 6 +#define HIFC_HW_SCQC_BUS_COL 10 +#define HIFC_HW_SRQC_BUS_ROW 4 +#define HIFC_HW_SRQC_BUS_COL 15 +#define HIFC_SCQC_BUS_ROW 3 +#define HIFC_SCQC_BUS_COL 29 + +#define HIFC_QUEUE_INFO_BUS_NUM 4 +struct hifc_queue_info_bus_s { + unsigned long long bus[HIFC_QUEUE_INFO_BUS_NUM]; +}; + +unsigned int hifc_free_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info); +unsigned int hifc_alloc_parent_resource(void *v_hba, + struct unf_rport_info_s *v_rport_info); +unsigned int hifc_create_root_queues(void *v_hba); +void hifc_destroy_root_queues(void *v_hba); +unsigned int hifc_alloc_parent_queue_mgr(void *v_hba); +void hifc_free_parent_queue_mgr(void *v_hba); +unsigned int hifc_create_common_share_queues(void *v_hba); +void hifc_destroy_common_share_queues(void *v_hba); +unsigned int hifc_alloc_parent_sq_wqe_page_pool(void *v_hba); +void hifc_free_parent_sq_wqe_page_pool(void *v_hba); + +struct hifc_parent_queue_info_s *hifc_find_parent_queue_info_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg); +struct hifc_parent_sq_info_s *hifc_find_parent_sq_by_pkg( + void *v_hba, struct unf_frame_pkg_s *v_pkg); +struct hifc_parent_ctx_s *hifc_get_parnt_ctx_virt_addr_by_pkg( + void *v_hba, + struct unf_frame_pkg_s *v_pkg); +unsigned int hifc_get_parent_ctx_xid_by_pkg(void *v_hba, + struct unf_frame_pkg_s *v_pkg); + +unsigned int hifc_root_sq_enqueue(void *v_hba, + struct hifc_root_sqe_s *v_sqe); +void hifc_process_root_rqe(unsigned long v_rq_info); + +unsigned int hifc_root_cmdq_enqueue(void *v_hba, + union hifc_cmdqe_u *v_cmd_qe, + unsigned short v_cmd_len); + +void hifc_process_scq_cqe(unsigned long scq_info); +unsigned int hifc_process_scq_cqe_entity(unsigned long v_scq_info, + unsigned int proc_cnt); + +void hifc_post_els_srq_wqe(struct hifc_srq_info_s *v_srq_info, + unsigned short buf_id); +void hifc_process_aeqe(void *v_srv_handle, unsigned char evt_type, u64 evt_val); + +unsigned int hifc_parent_sq_enqueue(struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_sqe); +void hifc_free_sq_wqe_page(struct hifc_parent_sq_info_s *v_sq, + unsigned int cur_cmsn); +unsigned int hifc_reclaim_sq_wqe_page(void *v_hba, union hifcoe_scqe_u *v_scqe); + +void hifc_set_root_sq_flush_state(void *v_hba, int in_flush); +void hifc_set_rport_flush_state(void *v_hba, int in_flush); +unsigned int hifc_clear_fetched_sq_wqe(void *v_hba); +unsigned int hifc_clear_pending_sq_wqe(void *v_hba); + +void hifc_free_parent_queues(void *v_hba); +void hifc_enable_queues_dispatch(void *v_hba); +void hifc_queue_pre_process(void *v_hba, int v_clean); +void hifc_free_parent_queue_info( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info); +unsigned int hifc_send_session_rst_cmd( + void *v_hba, + struct hifc_parent_queue_info_s *v_parent_queue_info, + unsigned int v_mode); +void hifc_build_session_rst_wqe(void *v_hba, + struct hifc_parent_sq_info_s *v_sq, + struct hifcoe_sqe_s *v_sqe, + enum hifc_session_reset_mode_e v_mode, + unsigned int scqn); + +unsigned int hifc_rport_session_rst(void *v_hba, + struct unf_rport_info_s *v_rport_info); +unsigned int hifc_get_rport_maped_cmd_scqn(void *v_hba, + unsigned int rport_index); +unsigned int hifc_get_rport_maped_sts_scqn(void *v_hba, + unsigned int rport_index); + +void hifc_destroy_srq(void *v_hba); +unsigned int hifc_push_delay_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_offload_parent_queue, + struct hifc_root_sqe_s *v_sqe, + struct unf_frame_pkg_s *v_pkg); + +void hifc_push_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_parent_queue_info_s *v_offload_parent_queue, + struct unf_rport_info_s *v_rport_info); +void hifc_pop_destroy_parent_queue_sqe( + void *v_hba, + struct hifc_destroy_ctrl_info_s *v_destroy_sqe_info); +struct hifc_parent_queue_info_s *hifc_find_offload_parent_queue( + void *v_hba, + unsigned int v_local_id, + unsigned int v_remote_id, + unsigned int v_rport_index); + +unsigned int hifc_flush_ini_resp_queue(void *v_hba); +void hifc_rcvd_els_from_srq_time_out(struct work_struct *work); +#endif diff --git a/drivers/scsi/huawei/hifc/unf_lport.c b/drivers/scsi/huawei/hifc/unf_lport.c new file mode 100644 index 000000000000..09986f177fcc --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_lport.c @@ -0,0 +1,1129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_log.h" +#include "unf_common.h" +#include "unf_event.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" +#include "unf_npiv.h" + +static void unf_lport_timeout(struct work_struct *work); + +void unf_cmmark_dirty_mem(struct unf_lport_s *v_lport, + enum unf_lport_dirty_flag_e v_etype) +{ + UNF_CHECK_VALID(0x1801, UNF_TRUE, v_lport, return); + + v_lport->dirty_flag |= v_etype; +} + +unsigned int unf_init_lport_route(struct unf_lport_s *v_lport) +{ + int ret = 0; + + UNF_CHECK_VALID(0x1802, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Init L_Port route work */ + INIT_DELAYED_WORK(&v_lport->route_timer_work, unf_lport_route_work); + + /* Delay route work */ + ret = queue_delayed_work( + unf_work_queue, + &v_lport->route_timer_work, + (unsigned long)msecs_to_jiffies(UNF_LPORT_POLL_TIMER)); + if (unlikely(ret == UNF_FALSE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) schedule route work failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + return unf_lport_refinc(v_lport); +} + +void unf_destroy_lport_route(struct unf_lport_s *v_lport) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1803, UNF_TRUE, v_lport, return); + + /* Cancel (route timer) delay work */ + UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id, + &v_lport->route_timer_work, + "Route Timer work"); + if (ret == RETURN_OK) { + /* Corresponding to ADD operation */ + unf_lport_ref_dec(v_lport); + } + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE; +} + +static void unf_lport_config(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1816, UNF_TRUE, v_lport, return); + + INIT_DELAYED_WORK(&v_lport->retry_work, unf_lport_timeout); + + v_lport->max_retry_count = UNF_MAX_RETRY_COUNT; /* 3 */ + v_lport->retries = 0; +} + +void unf_init_portparms(struct unf_lport_s *v_lport) +{ + INIT_LIST_HEAD(&v_lport->list_vports_head); + INIT_LIST_HEAD(&v_lport->list_intergrad_vports); + INIT_LIST_HEAD(&v_lport->list_destroy_vports); + INIT_LIST_HEAD(&v_lport->entry_lport); + spin_lock_init(&v_lport->lport_state_lock); + + v_lport->max_frame_size = max_frame_size; + v_lport->ed_tov = UNF_DEFAULT_EDTOV; + v_lport->ra_tov = UNF_DEFAULT_RATOV; + v_lport->rr_tov = UNF_DEFAULT_RRTOV; + v_lport->fabric_node_name = 0; + v_lport->b_priority = UNF_PRIORITY_DISABLE; + v_lport->b_port_dir_exchange = UNF_FALSE; + /* Delay (retry) work init */ + unf_lport_config(v_lport); + + unf_set_lport_state(v_lport, UNF_LPORT_ST_ONLINE); /* online */ + + v_lport->link_up = UNF_PORT_LINK_DOWN; + v_lport->b_port_removing = UNF_FALSE; + v_lport->lport_free_completion = NULL; + v_lport->last_tx_fault_jif = 0; + v_lport->enhanced_features = 0; + v_lport->destroy_step = INVALID_VALUE32; + v_lport->dirty_flag = 0; + v_lport->b_switch_state = UNF_FALSE; + v_lport->b_bbscn_support = UNF_FALSE; + + v_lport->en_start_work_state = UNF_START_WORK_STOP; + v_lport->sfp_power_fault_count = 0; + v_lport->sfp_9545_fault_count = 0; + + atomic_set(&v_lport->port_no_operater_flag, UNF_LPORT_NORMAL); + atomic_set(&v_lport->lport_ref_cnt, 0); + atomic_set(&v_lport->scsi_session_add_success, 0); + atomic_set(&v_lport->scsi_session_add_failed, 0); + atomic_set(&v_lport->scsi_session_del_success, 0); + atomic_set(&v_lport->scsi_session_del_failed, 0); + atomic_set(&v_lport->add_start_work_failed, 0); + atomic_set(&v_lport->add_closing_work_failed, 0); + atomic_set(&v_lport->alloc_scsi_id, 0); + atomic_set(&v_lport->resume_scsi_id, 0); + atomic_set(&v_lport->reuse_scsi_id, 0); + atomic_set(&v_lport->device_alloc, 0); + atomic_set(&v_lport->device_destroy, 0); + atomic_set(&v_lport->session_loss_tmo, 0); + + atomic64_set(&v_lport->exchg_index, 1); + atomic_inc(&v_lport->lport_ref_cnt); + atomic_set(&v_lport->err_code_obtain_freq, 0); + + memset(&v_lport->link_service_info, 0, + sizeof(struct unf_link_service_collect_s)); + memset(&v_lport->err_code_sum, 0, sizeof(struct unf_err_code_s)); +} + +void unf_reset_lport_params(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + + UNF_CHECK_VALID(0x1804, UNF_TRUE, v_lport, return); + + lport->link_up = UNF_PORT_LINK_DOWN; + lport->nport_id = 0; /* Need do FLOGI again to clear N_Port_ID */ + lport->max_frame_size = max_frame_size; + lport->ed_tov = UNF_DEFAULT_EDTOV; + lport->ra_tov = UNF_DEFAULT_RATOV; + lport->rr_tov = UNF_DEFAULT_RRTOV; + lport->fabric_node_name = 0; +} + +static enum unf_lport_login_state_e unf_lport_stat_online( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_LINK_UP: + /* EVENT_LINK_UP --->>> ST_LINK_UP */ + next_state = UNF_LPORT_ST_LINK_UP; + break; + + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_initial( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_LINK_UP: + /* EVENT_LINK_UP --->>> ST_LINK_UP */ + next_state = UNF_LPORT_ST_LINK_UP; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_linkup( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> FLOGI_WAIT */ + next_state = UNF_LPORT_ST_FLOGI_WAIT; + break; + + case UNF_EVENT_LPORT_READY: + /* EVENT_READY --->>> ST_READY */ + next_state = UNF_LPORT_ST_READY; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_flogi_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_PLOGI_WAIT */ + next_state = UNF_LPORT_ST_PLOGI_WAIT; + break; + + case UNF_EVENT_LPORT_READY: + /* EVENT_READY --->>> ST_READY */ + next_state = UNF_LPORT_ST_READY; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_REMOTE_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_plogi_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_RFT_ID_WAIT */ + next_state = UNF_LPORT_ST_RFT_ID_WAIT; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_rftid_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_RFF_ID_WAIT */ + next_state = UNF_LPORT_ST_RFF_ID_WAIT; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_stat_rffid_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_SCR_WAIT */ + next_state = UNF_LPORT_ST_SCR_WAIT; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_scr_wait( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_REMOTE_ACC: + /* EVENT_REMOTE_ACC --->>> ST_READY */ + next_state = UNF_LPORT_ST_READY; + break; + + case UNF_EVENT_LPORT_REMOTE_TIMEOUT: + /* EVENT_TIMEOUT --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_logo( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> ST_OFFLINE */ + next_state = UNF_LPORT_ST_OFFLINE; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_offline( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_ONLINE: + /* EVENT_ONLINE --->>> ST_ONLINE */ + next_state = UNF_LPORT_ST_ONLINE; + break; + + case UNF_EVENT_LPORT_RESET: + /* EVENT_RESET --->>> ST_RESET */ + next_state = UNF_LPORT_ST_RESET; + break; + + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_reset( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_NORMAL_ENTER: + /* EVENT_NORMAL_ENTER --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +static enum unf_lport_login_state_e unf_lport_state_ready( + enum unf_lport_login_state_e old_state, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + switch (event) { + case UNF_EVENT_LPORT_LINK_DOWN: + /* EVENT_LINK_DOWN --->>> ST_INITIAL */ + next_state = UNF_LPORT_ST_INITIAL; + break; + + case UNF_EVENT_LPORT_RESET: + /* EVENT_RESET --->>> ST_RESET */ + next_state = UNF_LPORT_ST_RESET; + break; + + case UNF_EVENT_LPORT_OFFLINE: + /* EVENT_OFFLINE --->>> ST_LOGO */ + next_state = UNF_LPORT_ST_LOGO; + break; + + default: + next_state = old_state; + break; + } + + return next_state; +} + +void unf_lport_stat_ma(struct unf_lport_s *v_lport, + enum unf_lport_event_e event) +{ + enum unf_lport_login_state_e old_state = UNF_LPORT_ST_ONLINE; + enum unf_lport_login_state_e next_state = UNF_LPORT_ST_ONLINE; + + UNF_CHECK_VALID(0x1805, UNF_TRUE, v_lport, return); + + old_state = v_lport->en_states; + switch (v_lport->en_states) { + case UNF_LPORT_ST_ONLINE: + next_state = unf_lport_stat_online(old_state, event); + break; + + case UNF_LPORT_ST_INITIAL: + next_state = unf_lport_stat_initial(old_state, event); + break; + + case UNF_LPORT_ST_LINK_UP: + next_state = unf_lport_stat_linkup(old_state, event); + break; + + case UNF_LPORT_ST_FLOGI_WAIT: + next_state = unf_lport_stat_flogi_wait(old_state, event); + break; + + case UNF_LPORT_ST_PLOGI_WAIT: + next_state = unf_lport_stat_plogi_wait(old_state, event); + break; + + case UNF_LPORT_ST_RFT_ID_WAIT: + next_state = unf_lport_stat_rftid_wait(old_state, event); + break; + + case UNF_LPORT_ST_RFF_ID_WAIT: + next_state = unf_lport_stat_rffid_wait(old_state, event); + break; + + case UNF_LPORT_ST_SCR_WAIT: + next_state = unf_lport_state_scr_wait(old_state, event); + break; + + case UNF_LPORT_ST_LOGO: + next_state = unf_lport_state_logo(old_state, event); + break; + + case UNF_LPORT_ST_OFFLINE: + next_state = unf_lport_state_offline(old_state, event); + break; + + case UNF_LPORT_ST_RESET: + next_state = unf_lport_state_reset(old_state, event); + break; + + case UNF_LPORT_ST_READY: + next_state = unf_lport_state_ready(old_state, event); + break; + + default: + next_state = old_state; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) hold state(0x%x)", + v_lport->port_id, v_lport->en_states); + break; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) with old state(0x%x) event(0x%x) next state(0x%x)", + v_lport->port_id, old_state, event, next_state); + + unf_set_lport_state(v_lport, next_state); +} + +unsigned int unf_init_lport_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1806, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + v_lport->lport_mgr_temp.pfn_unf_vport_get_free_and_init = NULL; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index = + unf_lookup_vport_by_vp_index; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_port_id = + unf_lookup_vport_by_port_id; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did = + unf_lookup_vport_by_did; + v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn = + unf_lookup_vport_by_wwpn; + v_lport->lport_mgr_temp.pfn_unf_vport_remove = unf_vport_remove; + return RETURN_OK; +} + +void unf_release_lport_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1807, UNF_TRUE, v_lport, return); + + memset(&v_lport->lport_mgr_temp, 0, + sizeof(struct unf_cm_lport_template_s)); + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP; +} + +unsigned int unf_lport_retry_flogi(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1808, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Get (new) R_Port */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI); + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Check L_Port state */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + if (v_lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) no need to retry FLOGI with state(0x%x)", + v_lport->port_id, v_lport->en_states); + + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + return RETURN_OK; + } + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_FLOGI; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Send FLOGI or FDISC */ + if (v_lport != v_lport->root_lport) { + ret = unf_send_fdisc(v_lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FDISC failed", + v_lport->port_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + } else { + ret = unf_send_flogi(v_lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE( + UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FLOGI failed\n", + v_lport->port_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + } + + return ret; +} + +unsigned int unf_lport_name_server_register( + struct unf_lport_s *v_lport, + enum unf_lport_login_state_e states) +{ + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1809, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Get (safe) R_Port 0xfffffc */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_DIR_SERV); + rport = unf_get_safe_rport(v_lport, rport, UNF_RPORT_REUSE_ONLY, + UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Update R_Port & L_Port state */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_DIR_SERV; /* 0xfffffc */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + switch (states) { + /* RFT_ID */ + case UNF_LPORT_ST_RFT_ID_WAIT: + ret = unf_send_rft_id(v_lport, rport); + break; + + /* RFF_ID */ + case UNF_LPORT_ST_RFF_ID_WAIT: + ret = unf_send_rff_id(v_lport, rport); + break; + + /* SCR */ + case UNF_LPORT_ST_SCR_WAIT: + ret = unf_send_scr(v_lport, NULL); + break; + + /* PLOGI */ + case UNF_LPORT_ST_PLOGI_WAIT: + default: + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + ret = unf_send_plogi(v_lport, rport); + break; + } + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) register fabric(0xfffffc) failed", + v_lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + + return ret; +} + +unsigned int unf_lport_enter_sns_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1810, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + if (!v_rport) { + rport = unf_get_rport_by_nport_id(v_lport, + UNF_FC_FID_DIR_SERV); + } else { + rport = v_rport; + } + + if (!rport) { + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + return RETURN_OK; + } + + /* Update L_Port & R_Port state */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Do R_Port LOGO state */ + unf_rport_enter_logo(v_lport, rport); + + return ret; +} + +void unf_lport_enter_sns_plogi(struct unf_lport_s *v_lport) +{ + /* Fabric or Public Loop Mode: Login with Name server */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x1811, UNF_TRUE, v_lport, return); + + /* Get (safe) R_Port 0xfffffc */ + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (rport) { + /* for port swap: Delete old R_Port if necessary */ + if (rport->local_nport_id != v_lport->nport_id) { + unf_rport_immediate_linkdown(v_lport, rport); + rport = NULL; + } + } + + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + unf_lport_error_recovery(lport); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_DIR_SERV; /* 0xfffffc */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Send PLOGI to Fabric(0xfffffc) */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send PLOGI to name server failed", + v_lport->port_id); + + unf_lport_error_recovery(lport); + } +} + +int unf_get_port_params(void *v_argin, void *v_argout) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_argin; + struct unf_low_level_port_mgr_op_s *port_mg = NULL; + struct unf_port_params_s port_params = { 0 }; + int ret = RETURN_OK; + + UNF_REFERNCE_VAR(v_argout); + UNF_CHECK_VALID(0x1812, UNF_TRUE, + v_argin, return UNF_RETURN_ERROR); + + port_mg = &lport->low_level_func.port_mgr_op; + if (!port_mg->pfn_ll_port_config_get) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, UNF_WARN, + "[warn]Port(0x%x) low level port_config_get function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, UNF_INFO, + "[warn]Port(0x%x) get parameters with default:R_A_TOV(%d) E_D_TOV(%d)", + lport->port_id, UNF_DEFAULT_FABRIC_RATOV, UNF_DEFAULT_EDTOV); + + port_params.ra_tov = UNF_DEFAULT_FABRIC_RATOV; + port_params.ed_tov = UNF_DEFAULT_EDTOV; + + /* Update parameters with Fabric mode */ + if ((lport->en_act_topo == UNF_ACT_TOP_PUBLIC_LOOP) || + (lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) { + lport->ra_tov = port_params.ra_tov; + lport->ed_tov = port_params.ed_tov; + } + + return ret; +} + +unsigned int unf_lport_enter_flogi(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct unf_cm_event_report *event = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x1813, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + /* Get (safe) R_Port */ + nport_id = UNF_FC_FID_FLOGI; /* 0xfffffe */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI); + + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Updtae L_Port state */ + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + /* LPort: LINK UP --> FLOGI WAIT */ + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + /* Update R_Port N_Port_ID */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_FLOGI; /* 0xfffffe */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + event = unf_get_one_event_node(v_lport); + if (event) { + event->lport = v_lport; + event->event_asy_flag = UNF_EVENT_ASYN; + /* NULL for timer */ + event->pfn_unf_event_task = unf_get_port_params; + event->para_in = (void *)v_lport; + unf_post_one_event_node(v_lport, event); + } + + if (v_lport != v_lport->root_lport) { + /* for NPIV */ + ret = unf_send_fdisc(v_lport, rport); + if (ret != RETURN_OK) + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } else { + /* for Physical Port */ + ret = unf_send_flogi(v_lport, rport); + if (ret != RETURN_OK) + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + + return ret; +} + +void unf_set_lport_state(struct unf_lport_s *v_lport, + enum unf_lport_login_state_e states) +{ + UNF_CHECK_VALID(0x1814, UNF_TRUE, v_lport, return); + if (states != v_lport->en_states) { + /* Reset L_Port retry count */ + v_lport->retries = 0; + } + + v_lport->en_states = states; +} + +static void unf_lport_timeout(struct work_struct *work) +{ + struct unf_lport_s *lport = NULL; + enum unf_lport_login_state_e state = UNF_LPORT_ST_READY; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1815, UNF_TRUE, work, return); + lport = container_of(work, struct unf_lport_s, retry_work.work); + + spin_lock_irqsave(&lport->lport_state_lock, flag); + state = lport->en_states; + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is timeout with state(0x%x)", + lport->port_id, state); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + switch (state) { + /* FLOGI retry */ + case UNF_LPORT_ST_FLOGI_WAIT: + (void)unf_lport_retry_flogi(lport); + break; + + case UNF_LPORT_ST_PLOGI_WAIT: + case UNF_LPORT_ST_RFT_ID_WAIT: + case UNF_LPORT_ST_RFF_ID_WAIT: + case UNF_LPORT_ST_SCR_WAIT: + (void)unf_lport_name_server_register(lport, state); + break; + + /* Send LOGO External */ + case UNF_LPORT_ST_LOGO: + break; + + /* Do nothing */ + case UNF_LPORT_ST_OFFLINE: + case UNF_LPORT_ST_READY: + case UNF_LPORT_ST_RESET: + case UNF_LPORT_ST_ONLINE: + case UNF_LPORT_ST_INITIAL: + case UNF_LPORT_ST_LINK_UP: + + lport->retries = 0; + break; + default: + break; + } + + unf_lport_ref_dec_to_destroy(lport); +} + +void unf_lport_error_recovery(struct unf_lport_s *v_lport) +{ + unsigned long delay = 0; + unsigned long flag = 0; + int ret = 0; + + UNF_CHECK_VALID(0x1817, UNF_TRUE, v_lport, return); + + if (unlikely(unf_lport_refinc(v_lport) != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is removing and no need process", + v_lport->port_id); + return; + } + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + + /* Port State: removing */ + if (v_lport->b_port_removing == UNF_TRUE) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is removing and no need process", + v_lport->port_id); + + unf_lport_ref_dec_to_destroy(v_lport); + return; + } + + /* Port State: offline */ + if (v_lport->en_states == UNF_LPORT_ST_OFFLINE) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is offline and no need process", + v_lport->port_id); + + unf_lport_ref_dec_to_destroy(v_lport); + return; + } + + /* Queue work state check */ + if (delayed_work_pending(&v_lport->retry_work)) { + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + unf_lport_ref_dec_to_destroy(v_lport); + return; + } + + /* Do retry operation */ + if (v_lport->retries < v_lport->max_retry_count) { + v_lport->retries++; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) enter recovery and retry %u times", + v_lport->port_id, v_lport->nport_id, + v_lport->retries); + + delay = (unsigned long)v_lport->ed_tov; + ret = queue_delayed_work(unf_work_queue, + &v_lport->retry_work, + (unsigned long)msecs_to_jiffies( + (unsigned int)delay)); + if (ret) { + atomic_inc(&v_lport->lport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) queue work success and reference count is %d", + v_lport->port_id, + atomic_read(&v_lport->lport_ref_cnt)); + } + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + } else { + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_REMOTE_TIMEOUT); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) register operation timeout and do LOGO", + v_lport->port_id); + + /* Do L_Port LOGO */ + (void)unf_lport_enter_sns_logo(v_lport, NULL); + } + + unf_lport_ref_dec_to_destroy(v_lport); +} + +struct unf_lport_s *unf_cm_lookup_vport_by_vp_index(struct unf_lport_s *v_lport, + unsigned short v_vp_index) +{ + UNF_CHECK_VALID(0x1819, UNF_TRUE, v_lport, return NULL); + + if (v_vp_index == 0) + return v_lport; + + if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do look up vport by index is NULL", + v_lport->port_id); + + return NULL; + } + + return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_vp_index( + v_lport, v_vp_index); +} + +struct unf_lport_s *unf_cm_lookup_vport_by_did(struct unf_lport_s *v_lport, + unsigned int v_did) +{ + UNF_CHECK_VALID(0x1821, UNF_TRUE, v_lport, return NULL); + + if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do look up vport by D_ID is NULL", + v_lport->port_id); + + return NULL; + } + + return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_did(v_lport, + v_did); +} + +struct unf_lport_s *unf_cm_lookup_vport_by_wwpn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn) +{ + UNF_CHECK_VALID(0x1822, UNF_TRUE, v_lport, return NULL); + + if (!v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do look up vport by WWPN is NULL", + v_lport->port_id); + + return NULL; + } + + return v_lport->lport_mgr_temp.pfn_unf_lookup_vport_by_wwpn(v_lport, + v_wwpn); +} + +void unf_cm_vport_remove(struct unf_lport_s *v_vport) +{ + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x1823, UNF_TRUE, v_vport, return); + lport = v_vport->root_lport; + UNF_CHECK_VALID(0x1824, UNF_TRUE, lport, return); + + if (!lport->lport_mgr_temp.pfn_unf_vport_remove) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) function do vport remove is NULL", + lport->port_id); + return; + } + + lport->lport_mgr_temp.pfn_unf_vport_remove(v_vport); +} diff --git a/drivers/scsi/huawei/hifc/unf_lport.h b/drivers/scsi/huawei/hifc/unf_lport.h new file mode 100644 index 000000000000..cb9105e12b51 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_lport.h @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __UNF_LPORT_H +#define __UNF_LPORT_H +#include "unf_disc.h" +#include "unf_event.h" +#include "unf_common.h" + +#define UNF_PORT_TYPE_FC 0 +#define UNF_PORT_TYPE_DISC 1 +#define UNF_FW_UPDATE_PATH_LEN_MAX 255 +#define UNF_EXCHG_MGR_NUM (4) + +#define UNF_MAX_IO_RETURN_VALUE 0x12 +#define UNF_MAX_SCSI_CMD 0xFF + +enum unf_scsi_error_handle_type { + UNF_SCSI_ABORT_IO_TYPE = 0, + UNF_SCSI_DEVICE_RESET_TYPE, + UNF_SCSI_TARGET_RESET_TYPE, + UNF_SCSI_BUS_RESET_TYPE, + UNF_SCSI_HOST_RESET_TYPE, + UNF_SCSI_VIRTUAL_RESET_TYPE, + UNF_SCSI_ERROR_HANDLE_BUTT +}; + +enum unf_lport_destroy_step_e { + UNF_LPORT_DESTROY_STEP_0_SET_REMOVING = 0, + UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT, + UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE, + UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER, + UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR, + UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL, + UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR, + UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP, + UNF_LPORT_DESTROY_STEP_8_DESTROY_RPORT_MG_TMP, + UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP, + UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE, + UNF_LPORT_DESTROY_STEP_11_UNREG_TGT_HOST, + UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST, + UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE, + UNF_LPORT_DESTROY_STEP_BUTT +}; + +enum unf_lport_enhanced_feature_e { + /* Enhance GFF feature connect even if fail to get GFF feature */ + UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF = 0x0001, + /* Enhance IO balance */ + UNF_LPORT_ENHANCED_FEATURE_IO_TRANSFERLIST = 0x0002, + /* Enhance IO check */ + UNF_LPORT_ENHANCED_FEATURE_IO_CHECKPOINT = 0x0004, + /* Close FW ROUTE */ + UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE = 0x0008, + /* lowest frequency read SFP information */ + UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE = 0x0010, + UNF_LPORT_ENHANCED_FEATURE_BUTT +}; + +enum unf_lport_login_state_e { + UNF_LPORT_ST_ONLINE = 0x2000, /* uninitialized */ + UNF_LPORT_ST_INITIAL, /* initialized and LinkDown */ + UNF_LPORT_ST_LINK_UP, /* initialized and Link UP */ + UNF_LPORT_ST_FLOGI_WAIT, /* waiting for FLOGI completion */ + UNF_LPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */ + UNF_LPORT_ST_RNN_ID_WAIT, /* waiting for RNN_ID completion */ + UNF_LPORT_ST_RSNN_NN_WAIT, /* waiting for RSNN_NN completion */ + UNF_LPORT_ST_RSPN_ID_WAIT, /* waiting for RSPN_ID completion */ + UNF_LPORT_ST_RPN_ID_WAIT, /* waiting for RPN_ID completion */ + UNF_LPORT_ST_RFT_ID_WAIT, /* waiting for RFT_ID completion */ + UNF_LPORT_ST_RFF_ID_WAIT, /* waiting for RFF_ID completion */ + UNF_LPORT_ST_SCR_WAIT, /* waiting for SCR completion */ + UNF_LPORT_ST_READY, /* ready for use */ + UNF_LPORT_ST_LOGO, /* waiting for LOGO completion */ + UNF_LPORT_ST_RESET, /* being reset and will restart */ + UNF_LPORT_ST_OFFLINE, /* offline */ + UNF_LPORT_ST_BUTT +}; + +enum unf_lport_event_e { + UNF_EVENT_LPORT_NORMAL_ENTER = 0x8000, /* next state enter */ + UNF_EVENT_LPORT_ONLINE = 0x8001, /* LPort link up */ + UNF_EVENT_LPORT_LINK_UP = 0x8002, /* LPort link up */ + UNF_EVENT_LPORT_LINK_DOWN = 0x8003, /* LPort link down */ + UNF_EVENT_LPORT_OFFLINE = 0x8004, /* lPort bing stopped */ + UNF_EVENT_LPORT_RESET = 0x8005, + UNF_EVENT_LPORT_REMOTE_ACC = 0x8006, /* next state enter */ + UNF_EVENT_LPORT_REMOTE_RJT = 0x8007, /* rport reject */ + UNF_EVENT_LPORT_REMOTE_TIMEOUT = 0x8008, /* rport time out */ + UNF_EVENT_LPORT_READY = 0x8009, + UNF_EVENT_LPORT_REMOTE_BUTT +}; + +struct unf_cm_disc_mg_template_s { + /* start input:L_Port,return:ok/fail */ + unsigned int (*pfn_unf_disc_start)(void *v_lport); + + /* stop input: L_Port,return:ok/fail */ + unsigned int (*pfn_unf_disc_stop)(void *v_lport); + + /* Callback after disc complete[with event:ok/fail]. */ + void (*pfn_unf_disc_callback)(void *v_lport, unsigned int v_result); +}; + +struct unf_chip_manage_info_s { + struct list_head list_chip_thread_entry; + struct list_head list_head; + spinlock_t chip_event_list_lock; + struct task_struct *data_thread; + unsigned int list_num; + unsigned int slot_id; + unsigned char chip_id; + unsigned char rsv; + unsigned char sfp_9545_fault; /* 9545 fault */ + unsigned char sfp_power_fault; /* SFP power fault */ + atomic_t ref_cnt; + unsigned int b_thread_exit; + struct unf_chip_info_s chip_info; + atomic_t card_loop_test_flag; + spinlock_t card_loop_back_state_lock; + char update_path[UNF_FW_UPDATE_PATH_LEN_MAX]; +}; + +enum unf_timer_type_e { + UNF_TIMER_TYPE_INI_IO, + UNF_TIMER_TYPE_REQ_IO, + UNF_TIMER_TYPE_INI_RRQ, + UNF_TIMER_TYPE_SFS, + UNF_TIMER_TYPE_INI_ABTS +}; + +struct unf_cm_xchg_mgr_template_s { + /* Get new Xchg */ + /* input:L_Port,ini/tgt type,return:initialized Xchg */ + void *(*pfn_unf_xchg_get_free_and_init)(void *, unsigned int, + unsigned short); + + /* OXID,SID lookup Xchg */ + /* input: L_Port,OXID,SID,return:Xchg */ + void *(*pfn_unf_look_up_xchg_by_id)(void *, unsigned short, + unsigned int); + + /* input:L_Port,tag,return:Xchg */ + void *(*pfn_unf_look_up_xchg_by_tag)(void *, unsigned short); + + /* free Xchg */ + /* input:L_Port,Xchg,return:void */ + void (*pfn_unf_xchg_release)(void *, void *); + + /* Abort IO Xchg by SID/DID */ + /* input:L_Port,SID,DID,return:void */ + void (*pfn_unf_xchg_mgr_io_xchg_abort)(void *, void *, unsigned int, + unsigned int, unsigned int); + + /* Abort SFS Xchg by SID/DID */ + /* input:L_Port,SID,DID,return:void */ + void (*pfn_unf_xchg_mgr_sfs_xchg_abort)(void *, void *, + unsigned int, unsigned int); + + /* Clean Xchg by SID/DID */ + /* input:L_Port,SID,DID,return:void */ + void (*pfn_unf_xchg_mgr_xchg_clean)(void *, unsigned int, + unsigned int); + + /* Add Xchg timer */ + void (*pfn_unf_xchg_add_timer)(void *, unsigned long, + enum unf_timer_type_e); + + /* Cancel Xchg timer */ + void (*pfn_unf_xchg_cancel_timer)(void *); + + /* L_Port, Abort flag */ + void (*pfn_unf_xchg_abort_all_io)(void *, unsigned int, int); + + /* find Xchg by scsi Cmnd sn */ + void *(*pfn_unf_look_up_xchg_by_cmnd_sn)(void *, unsigned long long, + unsigned int); + /* input:L_Port,unsigned long long */ + void (*pfn_unf_xchg_abort_by_lun)(void *, void *, unsigned long long, + void *, int); + + void (*pfn_unf_xchg_abort_by_session)(void *, void *); + +}; + +struct unf_rport_pool_s { + unsigned int rport_pool_count; + void *rport_pool_add; + struct list_head list_rports_pool; + spinlock_t rport_free_pool_lock; + /* for synchronous reuse RPort POOL completion */ + struct completion *rport_pool_completion; + unsigned long *pul_rpi_bitmap; +}; + +struct unf_cm_lport_template_s { + /* Get VPort struct and init */ + /* input:pstLport,ini/tgt type,return:pstVport */ + void *(*pfn_unf_vport_get_free_and_init)(void *, unsigned int); + + /* For fast IO path */ + /* input: pstLport, VpIndex, return:pstVport */ + void *(*pfn_unf_lookup_vport_by_vp_index)(void *, unsigned short); + + /* input: pstLport, PortId,return:pstVport */ + void *(*pfn_unf_lookup_vport_by_port_id)(void *, unsigned int); + + /* input:pstLport, wwpn, return:pstVport */ + void *(*pfn_unf_lookup_vport_by_wwpn)(void *, unsigned long long); + + /* input:L_Port, DID, return:pstVport */ + void *(*pfn_unf_lookup_vport_by_did)(void *, unsigned int); + + /* input:L_Port,return:void */ + void (*pfn_unf_vport_remove)(void *); + +}; + +struct unf_vport_pool_s { + unsigned short vport_pool_count; + void *vport_pool_addr; + struct list_head list_vport_pool; + spinlock_t vport_pool_lock; + struct completion *vport_pool_completion; + unsigned short slab_next_index; /* Next free vport */ + unsigned short slab_total_sum; /* Total Vport num */ + struct unf_lport_s *vport_slab[0]; +}; + +struct unf_esgl_pool_s { + unsigned int esgl_pool_count; + void *esgl_pool_addr; + struct list_head list_esgl_pool; + spinlock_t esgl_pool_lock; + struct buf_describe_s esgl_buf_list; +}; + +/* little endium */ +struct unf_port_id_page_s { + struct list_head list_node_rscn; + unsigned char port_id_port; + unsigned char port_id_area; + unsigned char port_id_domain; + + unsigned char uc_addr_format : 2; + unsigned char uc_event_qualifier : 4; + unsigned char uc_reserved : 2; +}; + +struct unf_rscn_mg_s { + spinlock_t rscn_id_list_lock; + unsigned int free_rscn_count; + + /* free RSCN page list */ + struct list_head list_free_rscn_page; + + /* using RSCN page list */ + struct list_head list_using_rscn_page; + + /* All RSCN PAGE Address */ + void *rscn_pool_add; + struct unf_port_id_page_s *(*pfn_unf_get_free_rscn_node)( + void *v_rscn_mg); + void (*pfn_unf_release_rscn_node)(void *v_rscn_mg, void *v_rscn_node); +}; + +struct unf_disc_rport_mg_s { + void *disc_pool_add; + struct list_head list_disc_rports_pool; /* discovery DISC Rport pool */ + struct list_head list_disc_rport_busy; /* Busy discovery DiscRport */ +}; + +struct unf_disc_manage_info_s { + struct list_head list_head; + spinlock_t disc_event_list_lock; + atomic_t disc_contrl_size; + + unsigned int b_thread_exit; + struct task_struct *data_thread; + +}; + +struct unf_disc_s { + unsigned int retry_count; /* current retry counter */ + unsigned int max_retry_count; /* retry counter */ + unsigned int disc_flag; /* Disc flag :Loop Disc,Fabric Disc */ + + struct completion *disc_completion; + atomic_t disc_ref_cnt; + + struct list_head list_busy_rports; /* Busy RPort list */ + struct list_head list_delete_rports; /* Delete RPort list */ + struct list_head list_destroy_rports; + + spinlock_t rport_busy_pool_lock; + + struct unf_lport_s *lport; + enum unf_disc_state_e en_states; + struct delayed_work disc_work; + + /* Disc operation template */ + struct unf_cm_disc_mg_template_s unf_disc_temp; + + /* UNF_INIT_DISC/UNF_RSCN_DISC */ + unsigned int disc_option; + + /* RSCN list */ + struct unf_rscn_mg_s rscn_mgr; + struct unf_disc_rport_mg_s disc_rport_mgr; + struct unf_disc_manage_info_s disc_thread_info; + + unsigned long long last_disc_jiff; +}; + +enum unf_service_item_e { + UNF_SERVICE_ITEM_FLOGI = 0, + UNF_SERVICE_ITEM_PLOGI, + UNF_SERVICE_ITEM_PRLI, + UNF_SERVICE_ITEM_RSCN, + UNF_SERVICE_ITEM_ABTS, + UNF_SERVICE_ITEM_PDISC, + UNF_SERVICE_ITEM_ADISC, + UNF_SERVICE_ITEM_LOGO, + UNF_SERVICE_ITEM_SRR, + UNF_SERVICE_ITEM_RRQ, + UNF_SERVICE_ITEM_ECHO, + UNF_SERVICE_ITEM_RLS, + UNF_SERVICE_BUTT +}; + +/* Link service counter */ +struct unf_link_service_collect_s { + unsigned long long service_cnt[UNF_SERVICE_BUTT]; +}; + +struct unf_pcie_error_count_s { + unsigned int pcie_error_count[UNF_PCIE_BUTT]; +}; + +#define INVALID_WWPN 0 + +enum unf_device_scsi_state_e { + UNF_SCSI_ST_INIT = 0, + UNF_SCSI_ST_OFFLINE, + UNF_SCSI_ST_ONLINE, + UNF_SCSI_ST_DEAD, + UNF_SCSI_ST_BUTT +}; + +struct unf_wwpn_dfx_counter_info_s { + atomic64_t io_done_cnt[UNF_MAX_IO_RETURN_VALUE]; + atomic64_t scsi_cmd_cnt[UNF_MAX_SCSI_CMD]; + atomic64_t target_busy; + atomic64_t host_busy; + atomic_t error_handle[UNF_SCSI_ERROR_HANDLE_BUTT]; + atomic_t error_handle_result[UNF_SCSI_ERROR_HANDLE_BUTT]; + atomic_t device_alloc; + atomic_t device_destroy; +}; + +#define UNF_MAX_LUN_PER_TARGET 256 +struct unf_wwpn_rport_info_s { + unsigned long long wwpn; + struct unf_rport_s *rport; /* Rport which linkup */ + void *lport; /* Lport */ + unsigned int target_id; /* target_id distribute by scsi */ + unsigned int last_en_scis_state; + atomic_t en_scsi_state; + struct unf_wwpn_dfx_counter_info_s *dfx_counter; + struct delayed_work loss_tmo_work; + int b_need_scan; + struct list_head fc_lun_list; +}; + +struct unf_rport_scsi_id_image_s { + spinlock_t scsi_image_table_lock; + /* ScsiId Wwpn table */ + struct unf_wwpn_rport_info_s *wwn_rport_info_table; + unsigned int max_scsi_id; +}; + +enum unf_lport_dirty_flag_e { + UNF_LPORT_DIRTY_FLAG_NONE = 0, + UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY = 0x100, + UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY = 0x200, + UNF_LPORT_DIRTY_FLAG_DISC_DIRTY = 0x400, + UNF_LPORT_DIRTY_FLAG_BUTT +}; + +typedef struct unf_rport_s *(*pfn_unf_rport_set_qualifier)( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport_by_nport_id, + struct unf_rport_s *v_rport_by_wwpn, + unsigned long long v_wwpn, + unsigned int v_sid); +typedef unsigned int (*pfn_unf_tmf_status_recovery)(void *v_rport, + void *v_xchg); + +enum unf_start_work_state_e { + UNF_START_WORK_STOP, + UNF_START_WORK_BEGIN, + UNF_START_WORK_COMPLETE +}; + +struct unf_ini_private_info_s { + unsigned int driver_type; /* Driver Type */ + void *lower; /* driver private pointer */ +}; + +struct unf_product_hosts_info_s { + void *p_tgt_host; + unf_scsi_host_s *p_scsi_host; + struct unf_ini_private_info_s drv_private_info; + unf_scsi_host_s scsi_host; + +}; + +struct unf_lport_s { + unsigned int port_type; /* Port Type: fc */ + atomic_t lport_ref_cnt; /* LPort reference counter */ + void *fc_port; /* hard adapter hba pointer */ + void *rport; /* Used for SCSI interface */ + void *vport; + + struct unf_product_hosts_info_s host_info; /* scsi host mg */ + struct unf_rport_scsi_id_image_s rport_scsi_table; + int b_port_removing; + + int b_port_dir_exchange; + + spinlock_t xchg_mgr_lock; + struct list_head list_xchg_mgr_head; + struct list_head list_dirty_xchg_mgr_head; + void *p_xchg_mgr[UNF_EXCHG_MGR_NUM]; + enum int_e b_priority; + struct list_head list_vports_head; /* Vport Mg */ + struct list_head list_intergrad_vports; /* Vport intergrad list */ + struct list_head list_destroy_vports; /* Vport destroy list */ + /* VPort entry, hook in list_vports_head */ + struct list_head entry_vport; + struct list_head entry_lport; /* LPort entry */ + spinlock_t lport_state_lock; /* UL Port Lock */ + struct unf_disc_s disc; /* Disc and rport Mg */ + /* rport pool,Vport share Lport pool */ + struct unf_rport_pool_s rport_pool; + struct unf_esgl_pool_s esgl_pool; /* external sgl pool */ + unsigned int port_id; /* Port Management ,0x11000 etc. */ + enum unf_lport_login_state_e en_states; + unsigned int link_up; + unsigned int speed; + + unsigned long long node_name; + unsigned long long port_name; + unsigned long long fabric_node_name; + unsigned int nport_id; + unsigned int max_frame_size; + unsigned int ed_tov; + unsigned int ra_tov; + unsigned int rr_tov; + + unsigned int options; /* ini or tgt */ + unsigned int retries; + unsigned int max_retry_count; + + enum unf_act_topo_e en_act_topo; + enum int_e b_switch_state; /* 1---->ON,FALSE---->OFF */ + enum int_e b_bbscn_support; /* 1---->ON,FALSE---->OFF */ + + enum unf_start_work_state_e en_start_work_state; + + /* Xchg Mg operation template */ + struct unf_cm_xchg_mgr_template_s xchg_mgr_temp; + struct unf_cm_lport_template_s lport_mgr_temp; + struct unf_low_level_function_op_s low_level_func; + struct unf_event_mgr event_mgr; /* Disc and rport Mg */ + struct delayed_work retry_work; /* poll work or delay work */ + + struct workqueue_struct *link_event_wq; + struct workqueue_struct *xchg_wq; + + struct unf_err_code_s err_code_sum; /* Error code counter */ + struct unf_link_service_collect_s link_service_info; + struct unf_pcie_error_count_s pcie_error_cnt; + pfn_unf_rport_set_qualifier pfn_unf_qualify_rport; /* Qualify Rport */ + /* tmf marker recovery */ + pfn_unf_tmf_status_recovery pfn_unf_tmf_abnormal_recovery; + struct delayed_work route_timer_work; /* L_Port timer route */ + + unsigned short vp_index; /* Vport Index, Lport:0 */ + struct unf_vport_pool_s *vport_pool; /* Only for Lport */ + + void *root_lport; /* Point to physic Lport */ + struct completion *lport_free_completion; /* Free LPort Completion */ + +#define UNF_LPORT_NOP 1 +#define UNF_LPORT_NORMAL 0 + + atomic_t port_no_operater_flag; + + unsigned int enhanced_features; /* Enhanced Features */ + + unsigned int destroy_step; + unsigned int dirty_flag; + + struct unf_lport_sfp_info sfp_info; + struct unf_chip_manage_info_s *chip_info; + +#define UNF_LOOP_BACK_TESTING 1 +#define UNF_LOOP_BACK_TEST_END 0 + + unsigned char sfp_power_fault_count; + unsigned char sfp_9545_fault_count; + unsigned long long last_tx_fault_jif; /* SFP last tx fault jiffies */ + + /* Server card: UNF_FC_SERVER_BOARD_32_G(6)for 32G mode, + * UNF_FC_SERVER_BOARD_16_G(7)for 16G mode + */ + unsigned int card_type; + atomic_t scsi_session_add_success; + atomic_t scsi_session_add_failed; + atomic_t scsi_session_del_success; + atomic_t scsi_session_del_failed; + atomic_t add_start_work_failed; + atomic_t add_closing_work_failed; + atomic_t device_alloc; + atomic_t device_destroy; + atomic_t session_loss_tmo; + atomic_t alloc_scsi_id; + atomic_t resume_scsi_id; + atomic_t reuse_scsi_id; + atomic64_t last_exchg_mgr_idx; + atomic64_t exchg_index; + + unsigned int pcie_link_down_cnt; + int b_pcie_linkdown; + unsigned char fw_version[HIFC_VER_LEN]; + + atomic_t link_lose_tmo; + atomic_t err_code_obtain_freq; +}; + +void unf_lport_stat_ma(struct unf_lport_s *v_lport, + enum unf_lport_event_e v_event); +void unf_lport_error_recovery(struct unf_lport_s *v_lport); +void unf_set_lport_state(struct unf_lport_s *v_lport, + enum unf_lport_login_state_e v_states); +void unf_init_portparms(struct unf_lport_s *v_lport); +unsigned int unf_lport_enter_flogi(struct unf_lport_s *v_lport); +void unf_lport_enter_sns_plogi(struct unf_lport_s *v_lport); +unsigned int unf_init_disc_mgr(struct unf_lport_s *v_pst_lport); +unsigned int unf_init_lport_route(struct unf_lport_s *v_lport); +void unf_destroy_lport_route(struct unf_lport_s *v_lport); +void unf_reset_lport_params(struct unf_lport_s *v_lport); +void unf_cmmark_dirty_mem(struct unf_lport_s *v_lport, + enum unf_lport_dirty_flag_e v_etype); + +struct unf_lport_s *unf_cm_lookup_vport_by_vp_index(struct unf_lport_s *v_lport, + unsigned short v_vp_index); +struct unf_lport_s *unf_cm_lookup_vport_by_did(struct unf_lport_s *v_lport, + unsigned int v_did); +struct unf_lport_s *unf_cm_lookup_vport_by_wwpn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn); +void unf_cm_vport_remove(struct unf_lport_s *v_vport); + +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_npiv.c b/drivers/scsi/huawei/hifc/unf_npiv.c new file mode 100644 index 000000000000..1c3e3e99272e --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_npiv.c @@ -0,0 +1,1481 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 1, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" +#include "unf_rport.h" +#include "unf_io.h" +#include "unf_npiv.h" + +/* Note: + * The function related with resources allocation in Vport is shared with Lport, + * and rootLport is acted as parameters in this function including : + * stEsglPool; + * event_mgr; + * stRportPool + * ExchMgr + */ + +#define UNF_DELETE_VPORT_MAX_WAIT_TIME_MS 60000 + +unsigned int unf_init_vport_pool(struct unf_lport_s *v_lport) +{ + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned short vport_cnt = 0; + struct unf_lport_s *vport = NULL; + struct unf_vport_pool_s *vport_pool; + unsigned int vport_pool_size = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1950, UNF_TRUE, v_lport, return RETURN_ERROR); + + UNF_TOU16_CHECK(vport_cnt, v_lport->low_level_func.support_max_npiv_num, + return RETURN_ERROR); + if (vport_cnt == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) do not support NPIV", + v_lport->port_id); + + return RETURN_OK; + } + + vport_pool_size = sizeof(struct unf_vport_pool_s) + + sizeof(struct unf_lport_s *) * vport_cnt; + v_lport->vport_pool = vmalloc(vport_pool_size); + if (!v_lport->vport_pool) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cannot allocate vport pool", + v_lport->port_id); + + return RETURN_ERROR; + } + memset(v_lport->vport_pool, 0, vport_pool_size); + vport_pool = v_lport->vport_pool; + vport_pool->vport_pool_count = vport_cnt; + vport_pool->vport_pool_completion = NULL; + spin_lock_init(&vport_pool->vport_pool_lock); + INIT_LIST_HEAD(&vport_pool->list_vport_pool); + + vport_pool->vport_pool_addr = vmalloc( + (size_t)(vport_cnt * sizeof(struct unf_lport_s))); + if (!vport_pool->vport_pool_addr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cannot allocate vport pool address", + v_lport->port_id); + vfree(v_lport->vport_pool); + v_lport->vport_pool = NULL; + + return RETURN_ERROR; + } + + memset(vport_pool->vport_pool_addr, 0, vport_cnt * + sizeof(struct unf_lport_s)); + vport = (struct unf_lport_s *)vport_pool->vport_pool_addr; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + for (i = 0; i < vport_cnt; i++) { + list_add_tail(&vport->entry_vport, + &vport_pool->list_vport_pool); + vport++; + } + + vport_pool->slab_next_index = 0; + vport_pool->slab_total_sum = vport_cnt; + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + return ret; +} + +void unf_free_vport_pool(struct unf_lport_s *v_lport) +{ + struct unf_vport_pool_s *vport_pool = NULL; + int wait = UNF_FALSE; + unsigned long flag = 0; + unsigned int remain = 0; + struct completion vport_pool_completion = + COMPLETION_INITIALIZER(vport_pool_completion); + + UNF_CHECK_VALID(0x1951, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x1952, UNF_TRUE, v_lport->vport_pool, return); + vport_pool = v_lport->vport_pool; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + + if (vport_pool->slab_total_sum != vport_pool->vport_pool_count) { + vport_pool->vport_pool_completion = &vport_pool_completion; + remain = vport_pool->slab_total_sum - + vport_pool->vport_pool_count; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for vport pool completion(%ld) remain(%d)", + v_lport->port_id, jiffies, remain); + + wait_for_completion(vport_pool->vport_pool_completion); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for vport pool completion end(%ld)", + v_lport->port_id, jiffies); + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + vport_pool->vport_pool_completion = NULL; + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + } + + if (v_lport->vport_pool->vport_pool_addr) { + vfree(v_lport->vport_pool->vport_pool_addr); + v_lport->vport_pool->vport_pool_addr = NULL; + } + vfree(v_lport->vport_pool); + v_lport->vport_pool = NULL; + + UNF_REFERNCE_VAR(remain); +} + +static inline struct unf_lport_s *unf_get_vport_by_slab_index( + struct unf_vport_pool_s *v_vport_pool, + unsigned short v_slab_index) +{ + UNF_CHECK_VALID(0x1953, UNF_TRUE, v_vport_pool, return NULL); + + return v_vport_pool->vport_slab[v_slab_index]; +} + +static inline void unf_vport_pool_slab_set( + struct unf_vport_pool_s *v_vport_pool, + unsigned short v_slab_index, + struct unf_lport_s *v_vport) +{ + UNF_CHECK_VALID(0x1954, UNF_TRUE, v_vport_pool, return); + + v_vport_pool->vport_slab[v_slab_index] = v_vport; +} + +unsigned int unf_alloc_vp_index(struct unf_vport_pool_s *v_vport_pool, + struct unf_lport_s *v_vport, + unsigned short v_vpid) +{ + unsigned short slab_index = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1955, UNF_TRUE, v_vport_pool, return RETURN_ERROR); + UNF_CHECK_VALID(0x1956, UNF_TRUE, v_vport, return RETURN_ERROR); + + spin_lock_irqsave(&v_vport_pool->vport_pool_lock, flags); + if (v_vpid == 0) { + slab_index = v_vport_pool->slab_next_index; + while (unf_get_vport_by_slab_index(v_vport_pool, slab_index)) { + slab_index = (slab_index + 1) % + v_vport_pool->slab_total_sum; + + if (slab_index == v_vport_pool->slab_next_index) { + spin_unlock_irqrestore( + &v_vport_pool->vport_pool_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_REG_ATT, UNF_WARN, + "[warn]VPort pool has no slab "); + + return RETURN_ERROR; + } + } + } else { + slab_index = v_vpid - 1; + if (unf_get_vport_by_slab_index(v_vport_pool, slab_index)) { + spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, + "[warn]VPort Index(0x%x) is occupy", v_vpid); + + return RETURN_ERROR; + } + } + + unf_vport_pool_slab_set(v_vport_pool, slab_index, v_vport); + + v_vport_pool->slab_next_index = (slab_index + 1) % + v_vport_pool->slab_total_sum; + + spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&v_vport->lport_state_lock, flags); + v_vport->vp_index = slab_index + 1; /* VpIndex=SlabIndex+1 */ + spin_unlock_irqrestore(&v_vport->lport_state_lock, flags); + + return RETURN_OK; +} + +void unf_free_vp_index(struct unf_vport_pool_s *v_vport_pool, + struct unf_lport_s *v_vport) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1957, UNF_TRUE, v_vport_pool, return); + UNF_CHECK_VALID(0x1958, UNF_TRUE, v_vport, return); + + if ((v_vport->vp_index == 0) || + (v_vport->vp_index > v_vport_pool->slab_total_sum)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Input vpoot index(0x%x) is beyond the normal range, min(0x1), max(0x%x).", + v_vport->vp_index, v_vport_pool->slab_total_sum); + return; + } + + spin_lock_irqsave(&v_vport_pool->vport_pool_lock, flags); + /* SlabIndex=VpIndex-1 */ + unf_vport_pool_slab_set(v_vport_pool, v_vport->vp_index - 1, NULL); + spin_unlock_irqrestore(&v_vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&v_vport->lport_state_lock, flags); + v_vport->vp_index = INVALID_VALUE16; + spin_unlock_irqrestore(&v_vport->lport_state_lock, flags); +} + +struct unf_lport_s *unf_get_free_vport(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *vport = NULL; + struct list_head *list_head = NULL; + struct unf_vport_pool_s *vport_pool; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1959, 1, v_lport, return NULL); + UNF_CHECK_VALID(0x1960, UNF_TRUE, v_lport->vport_pool, return NULL); + + vport_pool = v_lport->vport_pool; + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + if (!list_empty(&vport_pool->list_vport_pool)) { + list_head = (&vport_pool->list_vport_pool)->next; + list_del(list_head); + vport_pool->vport_pool_count--; + list_add_tail(list_head, &v_lport->list_vports_head); + vport = list_entry(list_head, struct unf_lport_s, entry_vport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]LPort(0x%x)'s vport pool is empty", + v_lport->port_id); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + return NULL; + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + return vport; +} + +void unf_vport_back_to_pool(void *v_vport) +{ + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *list = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1961, UNF_TRUE, v_vport, return); + vport = v_vport; + lport = (struct unf_lport_s *)(vport->root_lport); + UNF_CHECK_VALID(0x1962, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x1963, UNF_TRUE, lport->vport_pool, return); + + unf_free_vp_index(lport->vport_pool, vport); + + spin_lock_irqsave(&lport->vport_pool->vport_pool_lock, flag); + + list = &vport->entry_vport; + list_del(list); + list_add_tail(list, &lport->vport_pool->list_vport_pool); + lport->vport_pool->vport_pool_count++; + + spin_unlock_irqrestore(&lport->vport_pool->vport_pool_lock, flag); +} + +void unf_init_vport_from_lport(struct unf_lport_s *v_vport, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x1964, UNF_TRUE, v_vport, return); + UNF_CHECK_VALID(0x1965, UNF_TRUE, v_lport, return); + + v_vport->port_type = v_lport->port_type; + v_vport->fc_port = v_lport->fc_port; + v_vport->en_act_topo = v_lport->en_act_topo; + v_vport->root_lport = v_lport; + v_vport->pfn_unf_qualify_rport = v_lport->pfn_unf_qualify_rport; + v_vport->link_event_wq = v_lport->link_event_wq; + v_vport->xchg_wq = v_lport->xchg_wq; + + memcpy(&v_vport->xchg_mgr_temp, &v_lport->xchg_mgr_temp, + sizeof(struct unf_cm_xchg_mgr_template_s)); + + memcpy(&v_vport->event_mgr, &v_lport->event_mgr, + sizeof(struct unf_event_mgr)); + + memset(&v_vport->lport_mgr_temp, 0, + sizeof(struct unf_cm_lport_template_s)); + + memcpy(&v_vport->low_level_func, &v_lport->low_level_func, + sizeof(struct unf_low_level_function_op_s)); +} + +void unf_check_vport_pool_status(struct unf_lport_s *v_lport) +{ + struct unf_vport_pool_s *vport_pool = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1968, UNF_TRUE, v_lport, return); + vport_pool = v_lport->vport_pool; + UNF_CHECK_VALID(0x1969, UNF_TRUE, vport_pool, return); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + + if ((vport_pool->vport_pool_completion) && + (vport_pool->slab_total_sum == vport_pool->vport_pool_count)) + complete(vport_pool->vport_pool_completion); + + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); +} + +void unf_vport_fabric_logo(struct unf_lport_s *v_vport) +{ + struct unf_rport_s *rport = NULL; + + rport = unf_get_rport_by_nport_id(v_vport, UNF_FC_FID_FLOGI); + UNF_CHECK_VALID(0x1970, UNF_TRUE, rport, return); + (void)unf_send_logo(v_vport, rport); +} + +void unf_vport_deinit(void *v_vport) +{ + struct unf_lport_s *vport = NULL; + + UNF_CHECK_VALID(0x1971, UNF_TRUE, v_vport, return); + vport = (struct unf_lport_s *)v_vport; + + unf_unregister_scsi_host(vport); + + unf_disc_mgr_destroy(vport); + + unf_release_xchg_mgr_temp(vport); + + unf_release_lport_mgr_temp(vport); + + unf_destroy_scsi_id_table(vport); + + unf_lport_release_lw_fun_op(vport); + vport->fc_port = NULL; + vport->vport = NULL; + + if (vport->lport_free_completion) { + complete(vport->lport_free_completion); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]VPort(0x%x) point(0x%p) completion free function is NULL", + vport->port_id, vport); + dump_stack(); + } +} + +void unf_vport_ref_dec(struct unf_lport_s *v_vport) +{ + UNF_CHECK_VALID(0x1972, UNF_TRUE, v_vport, return); + + if (atomic_dec_and_test(&v_vport->lport_ref_cnt)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]VPort(0x%x) point(0x%p) reference count is 0 and freevport", + v_vport->port_id, v_vport); + + unf_vport_deinit(v_vport); + } +} + +unsigned int unf_vport_init(void *v_vport) +{ + struct unf_lport_s *vport = NULL; + + UNF_CHECK_VALID(0x1974, UNF_TRUE, v_vport, return RETURN_ERROR); + vport = (struct unf_lport_s *)v_vport; + + vport->options = UNF_PORT_MODE_INI; + vport->nport_id = 0; + + if (unf_init_scsi_id_table(vport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Vport(0x%x) can not initialize SCSI ID table", + vport->port_id); + + return RETURN_ERROR; + } + + if (unf_init_disc_mgr(vport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Vport(0x%x) can not initialize discover manager", + vport->port_id); + unf_destroy_scsi_id_table(vport); + + return RETURN_ERROR; + } + + if (unf_register_scsi_host(vport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Vport(0x%x) vport can not register SCSI host", + vport->port_id); + unf_disc_mgr_destroy(vport); + unf_destroy_scsi_id_table(vport); + + return RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[event]Vport(0x%x) Create succeed with wwpn(0x%llx)", + vport->port_id, vport->port_name); + + return RETURN_OK; +} + +void unf_vport_remove(void *v_vport) +{ + struct unf_lport_s *vport = NULL; + struct unf_lport_s *lport = NULL; + struct completion vport_free_completion = + COMPLETION_INITIALIZER(vport_free_completion); + + UNF_CHECK_VALID(0x1975, UNF_TRUE, v_vport, return); + vport = (struct unf_lport_s *)v_vport; + lport = (struct unf_lport_s *)(vport->root_lport); + vport->lport_free_completion = &vport_free_completion; + + unf_set_lport_removing(vport); + + unf_vport_ref_dec(vport); + + wait_for_completion(vport->lport_free_completion); + unf_vport_back_to_pool(vport); + + unf_check_vport_pool_status(lport); +} + +void *unf_lookup_vport_by_vp_index(void *v_lport, unsigned short v_vp_index) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1976, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + if ((v_vp_index == 0) || (v_vp_index > vport_pool->slab_total_sum)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) input vport index(0x%x) is beyond the normal range(0x1~0x%x)", + lport->port_id, v_vp_index, + vport_pool->slab_total_sum); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + /* SlabIndex=VpIndex-1 */ + vport = unf_get_vport_by_slab_index(vport_pool, v_vp_index - 1); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + return (void *)vport; +} + +void *unf_lookup_vport_by_port_id(void *v_lport, unsigned int v_port_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1977, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_id == v_port_id) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_id == v_port_id) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no vport ID(0x%x).", + lport->port_id, v_port_id); + return NULL; +} + +void *unf_lookup_vport_by_did(void *v_lport, unsigned int v_did) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1978, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->nport_id == v_did) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->nport_id == v_did) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no vport Nport ID(0x%x)", + lport->port_id, v_did); + return NULL; +} + +void *unf_lookup_vport_by_wwpn(void *v_lport, unsigned long long v_wwpn) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1979, UNF_TRUE, v_lport, return NULL); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) vport pool is NULL", + lport->port_id); + + return NULL; + } + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_name == v_wwpn) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + if (vport->port_name == v_wwpn) { + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, + flag); + + return vport; + } + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) has no vport WWPN(0x%llx)", + lport->port_id, v_wwpn); + + return NULL; +} + +struct unf_lport_s *unf_alloc_vport(struct unf_lport_s *lport, + unsigned long long v_wwpn) +{ + struct unf_lport_s *vport = NULL; + + vport = unf_cm_lookup_vport_by_wwpn(lport, v_wwpn); + if (vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) has find vport with wwpn(0x%llx), can't create again", + lport->port_id, v_wwpn); + + return NULL; + } + + vport = unf_get_free_vport(lport); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Can not get free vport from pool"); + + return NULL; + } + vport->root_lport = lport; + vport->port_name = v_wwpn; + + unf_init_portparms(vport); + unf_init_vport_from_lport(vport, lport); + + if (unf_alloc_vp_index(lport->vport_pool, vport, 0) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Vport can not allocate vport index"); + unf_vport_back_to_pool(vport); + + return NULL; + } + vport->port_id = (((unsigned int)vport->vp_index) << + PORTID_VPINDEX_SHIT) | lport->port_id; + + return vport; +} + +unsigned int unf_npiv_conf(unsigned int v_port_id, unsigned long long v_wwpn) +{ +#define VPORT_WWN_MASK 0xff00ffffffffffff +#define VPORT_WWN_SHIFT 48 + + struct fc_vport_identifiers vid = { 0 }; + struct fc_vport *fc_port = NULL; + struct Scsi_Host *shost = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + unsigned short vport_id = 0; + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Cannot find LPort by (0x%x).", v_port_id); + + return RETURN_ERROR; + } + + vport = unf_cm_lookup_vport_by_wwpn(lport, v_wwpn); + if (vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) has find vport with wwpn(0x%llx), can't create again", + lport->port_id, v_wwpn); + + return RETURN_ERROR; + } + + vport = unf_get_free_vport(lport); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Can not get free vport from pool"); + + return RETURN_ERROR; + } + + unf_init_portparms(vport); + unf_init_vport_from_lport(vport, lport); + + if ((lport->port_name & VPORT_WWN_MASK) == (v_wwpn & VPORT_WWN_MASK)) { + vport_id = (v_wwpn & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT; + if (vport_id == 0) { + vport_id = (lport->port_name & ~VPORT_WWN_MASK) >> + VPORT_WWN_SHIFT; + } + } + + if (unf_alloc_vp_index(lport->vport_pool, vport, vport_id) != + RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Vport can not allocate vport index"); + unf_vport_back_to_pool(vport); + + return RETURN_ERROR; + } + + vport->port_id = (((unsigned int)vport->vp_index) << + PORTID_VPINDEX_SHIT) | lport->port_id; + + vid.roles = FC_PORT_ROLE_FCP_INITIATOR; + vid.vport_type = FC_PORTTYPE_NPIV; + vid.disable = false; + vid.node_name = lport->node_name; + + if (v_wwpn != 0) { + vid.port_name = v_wwpn; + } else { + if ((lport->port_name & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT != + vport->vp_index) + vid.port_name = + (lport->port_name & VPORT_WWN_MASK) | + (((unsigned long long)vport->vp_index) << + VPORT_WWN_SHIFT); + else + vid.port_name = (lport->port_name & VPORT_WWN_MASK); + } + + vport->port_name = vid.port_name; + + shost = lport->host_info.p_scsi_host; + + fc_port = fc_vport_create(shost, 0, &vid); + if (!fc_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) Cannot Failed to create vport wwpn=%llx", + lport->port_id, vid.port_name); + + unf_vport_back_to_pool(vport); + + return RETURN_ERROR; + } + + return RETURN_OK; +} + +struct unf_lport_s *unf_create_vport(struct unf_lport_s *v_lport, + struct vport_config_s *v_vport_config) +{ + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + enum unf_act_topo_e lport_topo = UNF_ACT_TOP_UNKNOWN; + enum unf_lport_login_state_e lport_state = UNF_LPORT_ST_ONLINE; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x1983, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x1983, UNF_TRUE, v_vport_config, return NULL); + + if (v_vport_config->port_mode != FC_PORT_ROLE_FCP_INITIATOR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Only support INITIATOR port mode(0x%x)", + v_vport_config->port_mode); + + return NULL; + } + lport = v_lport; + + if (lport != lport->root_lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) not root port return", + lport->port_id); + + return NULL; + } + + vport = unf_cm_lookup_vport_by_wwpn(lport, v_vport_config->port_name); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Port(0x%x) can not find vport with wwpn(0x%llx)", + lport->port_id, v_vport_config->port_name); + + return NULL; + } + + ret = unf_vport_init(vport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]VPort(0x%x) can not initialze vport", + vport->port_id); + + return NULL; + } + + spin_lock_irqsave(&lport->lport_state_lock, flag); + lport_topo = lport->en_act_topo; + lport_state = lport->en_states; + v_vport_config->node_name = lport->node_name; + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + vport->port_name = v_vport_config->port_name; + vport->node_name = v_vport_config->node_name; + vport->nport_id = 0; + + /* only fabric topo support NPIV */ + if ((lport_topo == UNF_ACT_TOP_P2P_FABRIC) && + /* after receive flogi acc */ + (lport_state >= UNF_LPORT_ST_PLOGI_WAIT) && + (lport_state <= UNF_LPORT_ST_READY)) { + vport->link_up = lport->link_up; + (void)unf_lport_login(vport, lport_topo); + } + + return vport; +} + +unsigned int unf_drop_vport(struct unf_lport_s *v_vport) +{ + unsigned int ret = RETURN_ERROR; + struct fc_vport *vport = NULL; + + UNF_CHECK_VALID(0x1985, UNF_TRUE, v_vport, return RETURN_ERROR); + + vport = v_vport->vport; + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]VPort(0x%x) find vport in scsi is NULL", + v_vport->port_id); + + return ret; + } + + ret = fc_vport_terminate(vport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]VPort(0x%x) terminate vport(%p) in scsi failed", + v_vport->port_id, vport); + + return ret; + } + return ret; +} + +unsigned int unf_delete_vport(unsigned int v_port_id, unsigned int v_vp_index) +{ + struct unf_lport_s *lport = NULL; + unsigned short vp_index = 0; + struct unf_lport_s *vport = NULL; + + lport = unf_find_lport_by_port_id(v_port_id); + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) can not be found by portid", + v_port_id); + + return RETURN_ERROR; + } + + if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) is in NOP, destroy all vports function will be called", + lport->port_id); + + return RETURN_OK; + } + + UNF_TOU16_CHECK(vp_index, v_vp_index, return RETURN_ERROR); + vport = unf_cm_lookup_vport_by_vp_index(lport, vp_index); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Can not lookup VPort by VPort index(0x%x)", + vp_index); + + return RETURN_ERROR; + } + + return unf_drop_vport(vport); +} + +void unf_vport_abort_all_sfs_exch(struct unf_lport_s *vport) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *exch = NULL; + unsigned long pool_lock_flags = 0; + unsigned long exch_lock_flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x1985, UNF_TRUE, vport, return); + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport( + (struct unf_lport_s *)(vport->root_lport), i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + ((struct unf_lport_s *) + (vport->root_lport))->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->sfs_busylist) { + exch = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&exch->xchg_state_lock, + exch_lock_flags); + if (vport == exch->lport && + (atomic_read(&exch->ref_cnt) > 0)) { + exch->io_state |= TGT_IO_STATE_ABORT; + spin_unlock_irqrestore(&exch->xchg_state_lock, + exch_lock_flags); + unf_disc_ctrl_size_inc(vport, exch->cmnd_code); + /* Transfer exch to destroy chain */ + list_del(xchg_node); + list_add_tail(xchg_node, + &hot_pool->list_destroy_xchg); + + } else { + spin_unlock_irqrestore(&exch->xchg_state_lock, + exch_lock_flags); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +void unf_vport_abort_ini_io_exch(struct unf_lport_s *vport) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *exch = NULL; + unsigned long pool_lock_flags = 0; + unsigned long exch_lock_flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x1986, UNF_TRUE, vport, return); + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport( + (struct unf_lport_s *)(vport->root_lport), i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) MgrIdex %d hot pool is NULL", + ((struct unf_lport_s *) + (vport->root_lport))->port_id, i); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->ini_busylist) { + exch = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + if (vport == exch->lport && + atomic_read(&exch->ref_cnt) > 0) { + /* Transfer exch to destroy chain */ + list_del(xchg_node); + list_add_tail(xchg_node, + &hot_pool->list_destroy_xchg); + + spin_lock_irqsave(&exch->xchg_state_lock, + exch_lock_flags); + exch->io_state |= INI_IO_STATE_DRABORT; + spin_unlock_irqrestore(&exch->xchg_state_lock, + exch_lock_flags); + } + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +void unf_vport_abort_all_exch(struct unf_lport_s *vport) +{ + UNF_CHECK_VALID(0x1988, UNF_TRUE, vport, return); + + unf_vport_abort_all_sfs_exch(vport); + + unf_vport_abort_ini_io_exch(vport); +} + +unsigned int unf_vport_wait_all_exch_removed(struct unf_lport_s *vport) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *exch = NULL; + unsigned int vport_uses = 0; + unsigned long flags = 0; + unsigned long long cur_jif = jiffies; + unsigned int i = 0; + + UNF_CHECK_VALID(0x1989, UNF_TRUE, vport, return RETURN_ERROR); + + while (1) { + vport_uses = 0; + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport( + (struct unf_lport_s *) + (vport->root_lport), i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, + UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) hot Pool is NULL", + ((struct unf_lport_s *) + (vport->root_lport))->port_id); + + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->list_destroy_xchg) { + exch = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + if (vport != exch->lport) + continue; + + vport_uses++; + + if (jiffies - cur_jif >= + msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_NORMAL, UNF_ERR, + "[error]VPort(0x%x) Abort Exch(0x%p) Type(0x%x) OxRxid(0x%x 0x%x), sid did(0x%x 0x%x) SeqId(0x%x) IOState(0x%x) Ref(0x%x)", + vport->port_id, exch, + (unsigned int)exch->xchg_type, + (unsigned int)exch->ox_id, + (unsigned int)exch->rx_id, + (unsigned int)exch->sid, + (unsigned int)exch->did, + (unsigned int)exch->seq_id, + (unsigned int)exch->io_state, + atomic_read(&exch->ref_cnt)); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + flags); + } + + if (vport_uses == 0) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]VPort(0x%x) has removed all exchanges it used", + vport->port_id); + break; + } + + if (jiffies - cur_jif >= msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS)) + return RETURN_ERROR; + + msleep(1000); + } + + return RETURN_OK; +} + +unsigned int unf_vport_wait_rports_removed(struct unf_lport_s *vport) +{ + struct unf_disc_s *disc = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned int vport_uses = 0; + unsigned long flags = 0; + unsigned long long cur_jif = jiffies; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x1990, UNF_TRUE, vport, return RETURN_ERROR); + disc = &vport->disc; + + while (1) { + vport_uses = 0; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flags); + list_for_each_safe(node, next_node, &disc->list_delete_rports) { + rport = list_entry(node, struct unf_rport_s, + entry_rport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, + UNF_MAJOR, + "[info]Vport(0x%x) Rport(0x%x) point(%p) is in Delete", + vport->port_id, rport->nport_id, rport); + vport_uses++; + } + list_for_each_safe(node, next_node, + &disc->list_destroy_rports) { + rport = list_entry(node, struct unf_rport_s, + entry_rport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, + UNF_MAJOR, + "[info]Vport(0x%x) Rport(0x%x) point(%p) is in Destroy", + vport->port_id, rport->nport_id, rport); + vport_uses++; + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags); + + if (vport_uses == 0) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]VPort(0x%x) has removed all RPorts it used", + vport->port_id); + break; + } + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Vport(0x%x) has %d RPorts not removed wait timeout(30s)", + vport->port_id, vport_uses); + + if (jiffies - cur_jif >= + msecs_to_jiffies(UNF_DELETE_VPORT_MAX_WAIT_TIME_MS)) + return RETURN_ERROR; + + msleep(5000); + } + + UNF_REFERNCE_VAR(rport); + + return RETURN_OK; +} + +unsigned int unf_destroy_one_vport(struct unf_lport_s *vport) +{ + unsigned int ret = RETURN_ERROR; + struct unf_lport_s *root_port = NULL; + + UNF_CHECK_VALID(0x1992, UNF_TRUE, vport, return RETURN_ERROR); + + root_port = (struct unf_lport_s *)vport->root_lport; + + unf_vport_fabric_logo(vport); + + /* 1 set NOP */ + atomic_set(&vport->port_no_operater_flag, UNF_LPORT_NOP); + vport->b_port_removing = UNF_TRUE; + + /* 2 report linkdown to scsi and delele rpot */ + unf_link_down_one_vport(vport); + + /* 3 set abort for exchange */ + unf_vport_abort_all_exch(vport); + + /* 4 wait exch return freepool */ + if (!root_port->b_port_dir_exchange) { + ret = unf_vport_wait_all_exch_removed(vport); + if (ret != RETURN_OK) { + if ((root_port->b_port_removing) != UNF_TRUE) { + vport->b_port_removing = UNF_FALSE; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, + UNF_ERR, + "[err]VPort(0x%x) can not wait Exchange return freepool", + vport->port_id); + + return RETURN_ERROR; + } + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) is removing, there is dirty exchange, continue", + root_port->port_id); + + root_port->b_port_dir_exchange = UNF_TRUE; + } + } + + /* wait rport return rportpool */ + ret = unf_vport_wait_rports_removed(vport); + if (ret != RETURN_OK) { + vport->b_port_removing = UNF_FALSE; + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]VPort(0x%x) can not wait Rport return freepool", + vport->port_id); + + return RETURN_ERROR; + } + + unf_cm_vport_remove(vport); + + return RETURN_OK; +} + +void unf_link_down_one_vport(struct unf_lport_s *v_vport) +{ + unsigned long flag = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[info]VPort(0x%x) linkdown", v_vport->port_id); + + spin_lock_irqsave(&v_vport->lport_state_lock, flag); + v_vport->link_up = UNF_PORT_LINK_DOWN; + v_vport->nport_id = 0; /* set nportid 0 before send fdisc again */ + unf_lport_stat_ma(v_vport, UNF_EVENT_LPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_vport->lport_state_lock, flag); + + root_lport = (struct unf_lport_s *)v_vport->root_lport; + + unf_flush_disc_event(&root_lport->disc, v_vport); + + unf_clean_linkdown_rport(v_vport); +} + +void unf_linkdown_all_vports(void *v_lport) +{ + struct unf_lport_s *lport = NULL; + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x1993, UNF_TRUE, v_lport, return); + + lport = (struct unf_lport_s *)v_lport; + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) VPort pool is NULL", + lport->port_id); + + return; + } + + /* Transfer to the transition chain */ + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, + &lport->list_intergrad_vports); + (void)unf_lport_refinc(vport); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + while (!list_empty(&lport->list_intergrad_vports)) { + node = (&lport->list_intergrad_vports)->next; + vport = list_entry(node, struct unf_lport_s, entry_vport); + + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_vports_head); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + unf_link_down_one_vport(vport); + + unf_vport_ref_dec(vport); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); +} + +int unf_process_vports_linkup(void *v_arg_in, void *v_arg_out) +{ + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + int ret = RETURN_OK; + + UNF_REFERNCE_VAR(v_arg_out); + UNF_CHECK_VALID(0x1994, UNF_TRUE, v_arg_in, return RETURN_ERROR); + + lport = (struct unf_lport_s *)v_arg_in; + + if (atomic_read(&lport->port_no_operater_flag) == UNF_LPORT_NOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is NOP don't continue", + lport->port_id); + + return RETURN_OK; + } + + if (lport->link_up != UNF_PORT_LINK_UP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is not linkup don't continue.", + lport->port_id); + + return RETURN_OK; + } + + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) VPort pool is NULL.", + lport->port_id); + + return RETURN_OK; + } + + /* Transfer to the transition chain */ + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, + &lport->list_intergrad_vports); + (void)unf_lport_refinc(vport); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + while (!list_empty(&lport->list_intergrad_vports)) { + node = (&lport->list_intergrad_vports)->next; + vport = list_entry(node, struct unf_lport_s, entry_vport); + + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_vports_head); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + if (atomic_read(&vport->port_no_operater_flag) == + UNF_LPORT_NOP) { + unf_vport_ref_dec(vport); + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + continue; + } + + if ((lport->link_up == UNF_PORT_LINK_UP) && + (lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Vport(0x%x) begin login", + vport->port_id); + + vport->link_up = UNF_PORT_LINK_UP; + (void)unf_lport_login(vport, lport->en_act_topo); + + msleep(100); + } else { + unf_link_down_one_vport(vport); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Vport(0x%x) login failed because root port linkdown", + vport->port_id); + } + + unf_vport_ref_dec(vport); + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + return ret; +} + +void unf_linkup_all_vports(struct unf_lport_s *v_lport) +{ + struct unf_cm_event_report *event = NULL; + + UNF_CHECK_VALID(0x1996, UNF_TRUE, v_lport, return); + + if (unlikely((!v_lport->event_mgr.pfn_unf_get_free_event) || + (!v_lport->event_mgr.pfn_unf_post_event) || + (!v_lport->event_mgr.pfn_unf_release_event))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) Event fun is NULL", + v_lport->port_id); + return; + } + + event = v_lport->event_mgr.pfn_unf_get_free_event((void *)v_lport); + UNF_CHECK_VALID(0x1997, UNF_TRUE, event, return); + + event->lport = v_lport; + event->event_asy_flag = UNF_EVENT_ASYN; + event->pfn_unf_event_task = unf_process_vports_linkup; + event->para_in = (void *)v_lport; + + v_lport->event_mgr.pfn_unf_post_event(v_lport, event); +} + +void unf_destroy_all_vports(struct unf_lport_s *v_lport) +{ + struct unf_vport_pool_s *vport_pool = NULL; + struct unf_lport_s *lport = NULL; + struct unf_lport_s *vport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + lport = v_lport; + UNF_CHECK_VALID(0x1998, UNF_TRUE, lport, return); + + vport_pool = lport->vport_pool; + if (unlikely(!vport_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Lport(0x%x) VPort pool is NULL", + lport->port_id); + + return; + } + + /* Transfer to the transition chain */ + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + list_for_each_safe(node, next_node, &lport->list_vports_head) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_destroy_vports); + } + + list_for_each_safe(node, next_node, &lport->list_intergrad_vports) { + vport = list_entry(node, struct unf_lport_s, entry_vport); + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, + &lport->list_destroy_vports); + atomic_dec(&vport->lport_ref_cnt); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + while (!list_empty(&lport->list_destroy_vports)) { + node = (&lport->list_destroy_vports)->next; + vport = list_entry(node, struct unf_lport_s, entry_vport); + + list_del_init(&vport->entry_vport); + list_add_tail(&vport->entry_vport, &lport->list_vports_head); + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]VPort(0x%x) Destroy begin", + vport->port_id); + unf_drop_vport(vport); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_KEVENT, + "[info]VPort(0x%x) Destroy end", + vport->port_id); + + spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); + } + spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); +} + diff --git a/drivers/scsi/huawei/hifc/unf_npiv.h b/drivers/scsi/huawei/hifc/unf_npiv.h new file mode 100644 index 000000000000..de9572931b78 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_npiv.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __NPIV_H__ +#define __NPIV_H__ + +/* product VPORT configure */ +struct vport_config_s { + unsigned long long node_name; + unsigned long long port_name; + unsigned int port_mode; /* INI, TGT or both */ +}; + +/* product Vport function */ +#define PORTID_VPINDEX_MASK 0xff000000 +#define PORTID_VPINDEX_SHIT 24 +unsigned int unf_npiv_conf(unsigned int v_port_id, unsigned long long v_wwpn); +struct unf_lport_s *unf_create_vport(struct unf_lport_s *v_lport, + struct vport_config_s *v_vport_config); +unsigned int unf_delete_vport(unsigned int v_port_id, unsigned int v_vp_index); + +/* Vport pool creat and release function */ +unsigned int unf_init_vport_pool(struct unf_lport_s *v_lport); +void unf_free_vport_pool(struct unf_lport_s *v_lport); + +/* Lport resigster stLPortMgTemp function */ +void unf_vport_remove(void *v_vport); +void unf_vport_ref_dec(struct unf_lport_s *v_vport); + +/* linkdown all Vport after receive linkdown event */ +void unf_linkdown_all_vports(void *v_lport); +/* Lport receive Flogi Acc linkup all Vport */ +void unf_linkup_all_vports(struct unf_lport_s *v_lport); +/* Lport remove delete all Vport */ +void unf_destroy_all_vports(struct unf_lport_s *v_lport); +void unf_vport_fabric_logo(struct unf_lport_s *v_vport); +unsigned int unf_destroy_one_vport(struct unf_lport_s *v_vport); +struct unf_lport_s *unf_alloc_vport(struct unf_lport_s *v_lport, + unsigned long long v_wwpn); +unsigned int unf_drop_vport(struct unf_lport_s *v_vport); +void unf_link_down_one_vport(struct unf_lport_s *v_vport); +void *unf_lookup_vport_by_vp_index(void *v_lport, unsigned short v_vp_index); +void *unf_lookup_vport_by_port_id(void *v_lport, unsigned int v_port_id); +void *unf_lookup_vport_by_did(void *v_lport, unsigned int v_did); +void *unf_lookup_vport_by_wwpn(void *v_lport, unsigned long long v_wwpn); + +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_rport.c b/drivers/scsi/huawei/hifc/unf_rport.c new file mode 100644 index 000000000000..3b216763dd81 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_rport.c @@ -0,0 +1,2430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_log.h" +#include "unf_common.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include <scsi/scsi_transport_fc.h> +#include "unf_portman.h" + +/* rport state: */ +/* ready --->>> link_down --->>> cloing --->>> timeout --->>> delete */ + +struct unf_rport_feature_pool_s *port_fea_pool; + +/* + * Function Name : unf_sesion_loss_timeout + * Function Description: session loss timeout + * Input Parameters : struct work_struct *v_work + * Output Parameters : N/A + * Return Type : unsigned int + */ +void unf_sesion_loss_timeout(struct work_struct *v_work) +{ + struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL; + + UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return); + + wwpn_rport_info = container_of(v_work, struct unf_wwpn_rport_info_s, + loss_tmo_work.work); + if (unlikely(!wwpn_rport_info)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]wwpn_rport_info is NULL"); + return; + } + + atomic_set(&wwpn_rport_info->en_scsi_state, UNF_SCSI_ST_DEAD); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x) wwpn(0x%llx) set target(0x%x) scsi state to dead", + ((struct unf_lport_s *)(wwpn_rport_info->lport))->port_id, + wwpn_rport_info->wwpn, + wwpn_rport_info->target_id); +} + +/* + * Function Name : unf_alloc_scsi_id + * Function Description: alloc r_port scsi id + * Input Parameters : struct unf_lport_s *v_lport + * : struct unf_rport_s *v_rport + * Output Parameters : N/A + * Return Type : unsigned int + */ +static unsigned int unf_alloc_scsi_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + unsigned long flags = 0; + unsigned int index = 0; + unsigned int ret = UNF_RETURN_ERROR; + + rport_scsi_table = &v_lport->rport_scsi_table; + UNF_REFERNCE_VAR(ret); + + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); + + /* 1. At first, existence check */ + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (v_rport->port_name == wwn_rport_info->wwpn) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id), + (&wwn_rport_info->loss_tmo_work), + "loss tmo Timer work"); + + /* Plug case: reuse again */ + spin_lock_irqsave( + &rport_scsi_table->scsi_image_table_lock, + flags); + wwn_rport_info->rport = v_rport; + wwn_rport_info->last_en_scis_state = + atomic_read(&wwn_rport_info->en_scsi_state); + atomic_set(&wwn_rport_info->en_scsi_state, + UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]port(0x%x) find the same scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)", + v_lport->port_id, index, + wwn_rport_info->wwpn, + v_rport, v_rport->nport_id); + + atomic_inc(&v_lport->resume_scsi_id); + goto find; + } + } + + /* 2. Alloc new SCSI ID */ + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (wwn_rport_info->wwpn == INVALID_WWPN) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id), + (&wwn_rport_info->loss_tmo_work), + "loss tmo Timer work"); + + /* Use the free space */ + spin_lock_irqsave( + &rport_scsi_table->scsi_image_table_lock, + flags); + wwn_rport_info->rport = v_rport; + wwn_rport_info->wwpn = v_rport->port_name; + wwn_rport_info->last_en_scis_state = + atomic_read(&wwn_rport_info->en_scsi_state); + atomic_set(&wwn_rport_info->en_scsi_state, + UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]port(0x%x) allco new scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)", + v_lport->port_id, index, + wwn_rport_info->wwpn, + v_rport, v_rport->nport_id); + + atomic_inc(&v_lport->alloc_scsi_id); + goto find; + } + } + + /* 3. Reuse space has been used */ + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (atomic_read(&wwn_rport_info->en_scsi_state) == + UNF_SCSI_ST_DEAD) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + UNF_DELAYED_WORK_SYNC(ret, (v_lport->port_id), + (&wwn_rport_info->loss_tmo_work), + "loss tmo Timer work"); + + spin_lock_irqsave( + &rport_scsi_table->scsi_image_table_lock, + flags); + if (wwn_rport_info->dfx_counter) { + memset(wwn_rport_info->dfx_counter, 0, + sizeof(struct unf_wwpn_dfx_counter_info_s)); + } + wwn_rport_info->rport = v_rport; + wwn_rport_info->wwpn = v_rport->port_name; + wwn_rport_info->last_en_scis_state = + atomic_read(&wwn_rport_info->en_scsi_state); + atomic_set(&wwn_rport_info->en_scsi_state, + UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[info]port(0x%x) reuse a dead scsi_id(0x%x) by wwpn(0x%llx) rport(%p) n_port_id(0x%x)", + v_lport->port_id, index, + wwn_rport_info->wwpn, + v_rport, v_rport->nport_id); + + atomic_inc(&v_lport->reuse_scsi_id); + goto find; + } + } + + spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]port(0x%x) there is not enough scsi_id with max_value(0x%x)", + v_lport->port_id, index); + + return INVALID_VALUE32; + +find: + if (!wwn_rport_info->dfx_counter) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) allocate Rport(0x%x) DFX buffer", + v_lport->port_id, wwn_rport_info->rport->nport_id); + wwn_rport_info->dfx_counter = + vmalloc(sizeof(struct unf_wwpn_dfx_counter_info_s)); + if (!wwn_rport_info->dfx_counter) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) allocate DFX buffer fail", + v_lport->port_id); + + return INVALID_VALUE32; + } + + memset(wwn_rport_info->dfx_counter, 0, + sizeof(struct unf_wwpn_dfx_counter_info_s)); + } + + UNF_REFERNCE_VAR(ret); + return index; +} + +static unsigned int unf_get_scsi_id_by_wwpn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn) +{ + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + unsigned long flags = 0; + unsigned int index = 0; + + UNF_CHECK_VALID(0x3015, UNF_TRUE, + v_lport, return INVALID_VALUE32); + rport_scsi_table = &v_lport->rport_scsi_table; + + if (!v_wwpn) + return INVALID_VALUE32; + + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); + + for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[index]; + if (v_wwpn == wwn_rport_info->wwpn) { + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + return index; + } + } + + spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, + flags); + + return INVALID_VALUE32; +} + +static void unf_set_device_state(struct unf_lport_s *v_lport, + unsigned int v_scsi_id, + int en_scsi_state) +{ + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + struct unf_wwpn_rport_info_s *wwpn_rport_info = NULL; + + if (unlikely(v_scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) RPort scsi_id(0x%x) is max than 0x%x", + v_lport->port_id, v_scsi_id, UNF_MAX_SCSI_ID); + return; + } + + scsi_image_table = &v_lport->rport_scsi_table; + wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[v_scsi_id]; + atomic_set(&wwpn_rport_info->en_scsi_state, en_scsi_state); +} + +static void unf_set_rport_state(struct unf_rport_s *v_rport, + enum unf_rport_login_state_e v_states) +{ + UNF_CHECK_VALID(0x3055, UNF_TRUE, v_rport, return); + + if (v_states != v_rport->rp_state) { + /* Reset R_Port retry count */ + v_rport->retries = 0; + } + + v_rport->rp_state = v_states; +} + +void unf_rport_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* + * 1. port_logout + * 2. rcvd_rscn_port_not_in_disc + * 3. each_rport_after_rscn + * 4. rcvd_gpnid_rjt + * 5. rport_after_logout(rport is fabric port) + */ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3000, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3001, UNF_TRUE, v_rport, return); + UNF_REFERNCE_VAR(v_lport); + + /* 1. Update R_Port state: Link Down Event --->>> closing state */ + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* 3. Port enter closing (then enter to Delete) process */ + unf_rport_enter_closing(v_rport); +} + +static struct unf_rport_s *unf_rport_is_changed(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid) +{ + if (v_rport) { + /* S_ID or D_ID has been changed */ + if ((v_rport->nport_id != v_sid) || + (v_rport->local_nport_id != v_lport->nport_id)) { + /* + * 1. Swap case: (SID or DID changed): + * Report link down & delete immediately + */ + unf_rport_immediate_linkdown(v_lport, v_rport); + return NULL; + } + } + + return v_rport; +} + +struct unf_rport_s *unf_rport_set_qualifier_key_reuse( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport_by_nport_id, + struct unf_rport_s *v_rport_by_wwpn, + unsigned long long v_wwpn, + unsigned int v_sid) +{ + /* Used for HIFC Chip */ + struct unf_rport_s *rport = NULL; + struct unf_rport_s *rporta = NULL; + struct unf_rport_s *rportb = NULL; + int bwwpn_flag = 0; + + UNF_CHECK_VALID(0x3002, UNF_TRUE, v_lport, return NULL); + + /* About R_Port by N_Port_ID */ + rporta = unf_rport_is_changed(v_lport, v_rport_by_nport_id, v_sid); + /* About R_Port by WWpn */ + rportb = unf_rport_is_changed(v_lport, v_rport_by_wwpn, v_sid); + + if (!rporta && !rportb) { + return NULL; + } else if (!rporta && rportb) { + /* 3. Plug case: reuse again */ + rport = rportb; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by wwpn", + v_lport->port_id, rport, rport->port_name, + rport->nport_id, rport->local_nport_id); + + return rport; /* Get by WWPN */ + } else if (rporta && !rportb) { + bwwpn_flag = ((rporta->port_name != v_wwpn) && + (rporta->port_name != 0) && + (rporta->port_name != INVALID_VALUE64)); + if (bwwpn_flag) { + /* 4. WWPN changed: Report link down + * & delete immediately + */ + unf_rport_immediate_linkdown(v_lport, rporta); + return NULL; + } + + /* Updtae WWPN */ + rporta->port_name = v_wwpn; + rport = rporta; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by N_Port_ID", + v_lport->port_id, + rport, rport->port_name, + rport->nport_id, rport->local_nport_id); + + return rport; /* Get by N_Port_ID */ + } + + /* 5. Case for A == B && A && B */ + if (rporta == rportb) { + rport = rporta; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) find the same RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x)", + v_lport->port_id, + rport, rport->port_name, + rport->nport_id, rport->local_nport_id); + + return rport; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]port(0x%x) find two duplicate login. rport(A:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x) rport(B:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x)", + v_lport->port_id, + rporta, rporta->port_name, + rporta->nport_id, rporta->local_nport_id, + rportb, rportb->port_name, + rportb->nport_id, rportb->local_nport_id); + + /* 6. Case for A != B && A && B */ + unf_rport_immediate_linkdown(v_lport, rporta); + unf_rport_immediate_linkdown(v_lport, rportb); + + return NULL; +} + +struct unf_rport_s *unf_get_rport_by_wwn(struct unf_lport_s *v_lport, + unsigned long long v_wwpn) +{ + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + struct unf_rport_s *find_rport = NULL; + + UNF_CHECK_VALID(0x3049, UNF_TRUE, v_lport, return NULL); + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + + /* for each r_port from busy_list: compare wwpn(port name) */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + if (rport && rport->port_name == v_wwpn) { + find_rport = rport; + + break; + } + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + return find_rport; +} + +struct unf_rport_s *unf_find_valid_rport(struct unf_lport_s *v_lport, + unsigned long long v_wwpn, + unsigned int v_sid) +{ + struct unf_rport_s *rport = NULL; + struct unf_rport_s *rport_by_nport_id = NULL; + struct unf_rport_s *rport_by_wwpn = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3005, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3006, UNF_TRUE, + v_lport->pfn_unf_qualify_rport, return NULL); + + /* Get R_Port by WWN & N_Port_ID */ + rport_by_nport_id = unf_get_rport_by_nport_id(v_lport, v_sid); + rport_by_wwpn = unf_get_rport_by_wwn(v_lport, v_wwpn); + + /* R_Port check: by WWPN */ + if (rport_by_wwpn) { + spin_lock_irqsave(&rport_by_wwpn->rport_state_lock, flags); + if (rport_by_wwpn->nport_id == UNF_FC_FID_FLOGI) { + spin_unlock_irqrestore( + &rport_by_wwpn->rport_state_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[err]Port(0x%x) RPort(0x%p) find by WWPN(0x%llx) is invalid", + v_lport->port_id, rport_by_wwpn, v_wwpn); + + rport_by_wwpn = NULL; + } else { + spin_unlock_irqrestore( + &rport_by_wwpn->rport_state_lock, + flags); + } + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%p) find by N_Port_ID(0x%x) and RPort(0x%p) by WWPN(0x%llx)", + v_lport->port_id, v_lport->nport_id, + rport_by_nport_id, v_sid, rport_by_wwpn, v_wwpn); + + /* R_Port validity check: get by WWPN & N_Port_ID */ + rport = v_lport->pfn_unf_qualify_rport(v_lport, rport_by_nport_id, + rport_by_wwpn, + v_wwpn, v_sid); + return rport; +} + +void unf_rport_delay_login(struct unf_rport_s *v_rport) +{ + UNF_CHECK_VALID(0x3009, UNF_TRUE, v_rport, return); + + /* Do R_Port recovery: PLOGI or PRLI or LOGO */ + unf_rport_error_recovery(v_rport); +} + +unsigned int unf_rport_ref_inc(struct unf_rport_s *v_rport) +{ + UNF_CHECK_VALID(0x3010, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + if (atomic_read(&v_rport->rport_ref_cnt) <= 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Rport(0x%x) reference count is wrong %d", + v_rport->nport_id, + atomic_read(&v_rport->rport_ref_cnt)); + return UNF_RETURN_ERROR; + } + + atomic_inc(&v_rport->rport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Rport(0x%x) reference count is %d", + v_rport->nport_id, atomic_read(&v_rport->rport_ref_cnt)); + + return RETURN_OK; +} + +void unf_rport_enter_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* + * 1. TMF/ABTS timeout recovery :Y + * 2. L_Port error recovery --->>> larger than retry_count :Y + * 3. R_Port error recovery --->>> larger than retry_count :Y + * 4. Check PLOGI parameters --->>> parameter is error :Y + * 5. PRLI handler --->>> R_Port state is error :Y + * 6. PDISC handler --->>> R_Port state is not PRLI_WAIT :Y + * 7. ADISC handler --->>> R_Port state is not PRLI_WAIT :Y + * 8. PLOGI wait timeout with R_PORT is INI mode :Y + * 9. RCVD GFFID_RJT --->>> R_Port state is INIT :Y + * 10. RCVD GPNID_ACC --->>> R_Port state is error :Y + * 11. Private Loop mode with LOGO case :Y + * 12. P2P mode with LOGO case :Y + * 13. Fabric mode with LOGO case :Y + * 14. RCVD PRLI_ACC with R_Port is INI :Y + * 15. TGT RCVD BLS_REQ with session is error :Y + */ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3013, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3014, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + + if ((v_rport->rp_state == UNF_RPORT_ST_CLOSING) || + (v_rport->rp_state == UNF_RPORT_ST_DELETE)) { + /* 1. Already within Closing or Delete: Do nothing */ + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + return; + } else if (v_rport->rp_state == UNF_RPORT_ST_LOGO) { + /* 2. Update R_Port state: + * Normal Enter Event --->>> closing state + */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_NORMAL_ENTER); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* Send Logo if necessary */ + if (unf_send_logo(v_lport, v_rport) != RETURN_OK) + unf_rport_enter_closing(v_rport); + } else { + /* + * 3. Update R_Port state: Link Down Event --->>> closing state + * enter closing state + */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + unf_rport_enter_closing(v_rport); + } +} + +unsigned int unf_free_scsi_id(struct unf_lport_s *v_lport, + unsigned int v_scsi_id) +{ + unsigned long flags = 0; + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + + UNF_CHECK_VALID(0x3016, UNF_TRUE, + v_lport, return UNF_RETURN_ERROR); + + if (unlikely(v_lport->b_port_removing == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) is removing and do nothing", + v_lport->port_id, v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + if (unlikely(v_scsi_id >= UNF_MAX_SCSI_ID)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) scsi_id(0x%x) is bigger than %d", + v_lport->port_id, v_lport->nport_id, + v_scsi_id, UNF_MAX_SCSI_ID); + + return UNF_RETURN_ERROR; + } + + rport_scsi_table = &v_lport->rport_scsi_table; + if (rport_scsi_table->wwn_rport_info_table) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x_0x%x) RPort(0x%p) free scsi_id(0x%x) wwpn(0x%llx) target_id(0x%x) succeed", + v_lport->port_id, v_lport->nport_id, + rport_scsi_table->wwn_rport_info_table[v_scsi_id].rport, + v_scsi_id, + rport_scsi_table->wwn_rport_info_table[v_scsi_id].wwpn, + rport_scsi_table->wwn_rport_info_table[v_scsi_id].target_id); + + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, + flags); + wwn_rport_info = + &rport_scsi_table->wwn_rport_info_table[v_scsi_id]; + if (wwn_rport_info->rport) { + wwn_rport_info->rport->rport = NULL; + wwn_rport_info->rport = NULL; + } + + wwn_rport_info->target_id = INVALID_VALUE32; + atomic_set(&wwn_rport_info->en_scsi_state, UNF_SCSI_ST_DEAD); + + /* NOTE: remain WWPN/Port_Name unchanged(un-cleared) */ + spin_unlock_irqrestore( + &rport_scsi_table->scsi_image_table_lock, + flags); + + return RETURN_OK; + } + + return UNF_RETURN_ERROR; +} + +static void unf_report_ini_linkup_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + UNF_CHECK_VALID(0x3031, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3032, UNF_TRUE, v_rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[event]Port(0x%x) RPort(0x%x_0x%p) put INI link up work(%p) to work_queue", + v_lport->port_id, v_rport->nport_id, v_rport, + &v_rport->start_work); + + if (unlikely(!queue_work(v_lport->link_event_wq, + &v_rport->start_work))) { + atomic_inc(&v_lport->add_start_work_failed); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Port(0x%x) RPort(0x%x_0x%p) put INI link up to work_queue failed", + v_lport->port_id, v_rport->nport_id, v_rport); + } +} + +static void unf_report_ini_linkdown_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned int scsi_id = 0; + struct fc_rport *rport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3033, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3034, UNF_TRUE, v_rport, return); + + /* + * 1. set local device(rport/rport_info_table) state + * -------------------------------------------------OFF_LINE + ** + * about rport->scsi_id + * valid during rport link up to link down + */ + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + scsi_id = v_rport->scsi_id; + unf_set_device_state(v_lport, scsi_id, UNF_SCSI_ST_OFFLINE); + + /* 2. delete scsi's rport */ + rport = (struct fc_rport *)v_rport->rport; + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + if (rport) { + fc_remote_port_delete(rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]port(0x%x_0x%x) delete rport(0x%x) wwpn(0x%llx) scsi_id(0x%x) succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, + v_rport->port_name, scsi_id); + + atomic_inc(&v_lport->scsi_session_del_success); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x_0x%x) delete RPort(0x%x_0x%p) failed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport); + + atomic_inc(&v_lport->scsi_session_del_failed); + } +} + +void unf_update_lport_state_by_linkup_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int rport_att) +{ + /* Report R_Port Link Up/Down Event */ + unsigned long flag = 0; + enum unf_port_state_e en_lport_state = 0; + + UNF_CHECK_VALID(0x3019, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3020, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + + /* 1. R_Port does not has TGT mode any more */ + if (!(rport_att & UNF_FC4_FRAME_PARM_3_TGT) && + (v_rport->lport_ini_state == UNF_PORT_STATE_LINKUP)) { + v_rport->last_lport_ini_state = v_rport->lport_ini_state; + // L_Port INI mode: Down + v_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) does not have TGT attribute(0x%x) any more", + v_lport->port_id, v_rport->nport_id, rport_att); + } + + /* 2. R_Port with TGT mode, L_Port with INI mode */ + if ((rport_att & UNF_FC4_FRAME_PARM_3_TGT) && + (v_lport->options & UNF_FC4_FRAME_PARM_3_INI)) { + v_rport->last_lport_ini_state = v_rport->lport_ini_state; + // L_Port INI mode: Up + v_rport->lport_ini_state = UNF_PORT_STATE_LINKUP; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) update INI state with last(0x%x) and now(0x%x)", + v_lport->port_id, v_rport->last_lport_ini_state, + v_rport->lport_ini_state); + } + + /* 3. Report L_Port INI/TGT Down/Up event to SCSI */ + if (v_rport->last_lport_ini_state == v_rport->lport_ini_state) { + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed", + v_lport->port_id, v_rport->nport_id, v_rport, + v_rport->lport_ini_state); + } + + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + return; + } + + en_lport_state = v_rport->lport_ini_state; + + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + switch (en_lport_state) { + /* Link Down */ + case UNF_PORT_STATE_LINKDOWN: + unf_report_ini_linkdown_event(v_lport, v_rport); + break; + + /* Link Up */ + case UNF_PORT_STATE_LINKUP: + unf_report_ini_linkup_event(v_lport, v_rport); + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) with unknown link status(0x%x)", + v_lport->port_id, v_rport->lport_ini_state); + break; + } +} + +static void unf_rport_call_back(void *v_rport, + void *v_lport, + unsigned int v_result) +{ + /* Report R_Port link down event */ + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(v_result); + + UNF_CHECK_VALID(0x3037, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3038, UNF_TRUE, v_lport, return); + rport = (struct unf_rport_s *)v_rport; + lport = (struct unf_lport_s *)v_lport; + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->last_lport_ini_state = rport->lport_ini_state; + rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + rport->last_lport_tgt_state = rport->lport_tgt_state; + rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + + /* Report R_Port Link Down Event to scsi */ + if (rport->last_lport_ini_state == rport->lport_ini_state) { + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed", + lport->port_id, rport->nport_id, rport, + rport->lport_ini_state); + } + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + return; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_report_ini_linkdown_event(lport, rport); +} + +static void unf_rport_recovery_timeout(struct work_struct *v_work) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + enum unf_rport_login_state_e en_rp_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3039, UNF_TRUE, v_work, return); + + rport = container_of(v_work, struct unf_rport_s, recovery_work.work); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, "[err]RPort is NULL"); + + return; + } + + lport = rport->lport; + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x) Port is NULL", + rport->nport_id); + + /* for timer */ + unf_rport_ref_dec(rport); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + en_rp_state = rport->rp_state; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) state(0x%x) recovery timer timeout", + lport->port_id, lport->nport_id, + rport->nport_id, en_rp_state); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + switch (en_rp_state) { + case UNF_RPORT_ST_PLOGI_WAIT: + if (((lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) && + (lport->port_name > rport->port_name)) || + lport->en_act_topo != UNF_ACT_TOP_P2P_DIRECT) { + /* P2P: Name is master with P2P_D or has INI Mode */ + ret = unf_send_plogi(rport->lport, rport); + } + break; + + case UNF_RPORT_ST_PRLI_WAIT: + ret = unf_send_prli(rport->lport, rport); + break; + + default: + break; + } + + if (ret != RETURN_OK) + unf_rport_error_recovery(rport); + + /* company with timer */ + unf_rport_ref_dec(rport); +} + +static unsigned int unf_get_dev_loss_tmo_by_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct fc_rport *rport = (struct fc_rport *)v_rport->rport; + + if (rport) + return rport->dev_loss_tmo; + else + return (unsigned int)unf_get_link_lose_tmo(v_lport); +} + +void unf_schedule_closing_work(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned long flags = 0; + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + unsigned int scsi_id = 0; + unsigned int ret = 0; + unsigned int delay = 0; + + UNF_CHECK_VALID(0x3561, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3562, UNF_TRUE, v_rport, return); + + delay = unf_get_dev_loss_tmo_by_rport(v_lport, v_rport); + rport_scsi_table = &v_lport->rport_scsi_table; + scsi_id = v_rport->scsi_id; + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + + /* 1. Cancel recovery_work */ + if (cancel_delayed_work(&v_rport->recovery_work)) { + atomic_dec(&v_rport->rport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel recovery work succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport); + } + + /* 2. Cancel Open_work */ + if (cancel_delayed_work(&v_rport->open_work)) { + atomic_dec(&v_rport->rport_ref_cnt); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel open work succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport); + } + + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* 3. Work in-queue (switch to thread context) */ + if (!queue_work(v_lport->link_event_wq, &v_rport->closing_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_ERR, + "[warn]Port(0x%x) RPort(0x%x_0x%p) add link down to work queue failed", + v_lport->port_id, v_rport->nport_id, v_rport); + + atomic_inc(&v_lport->add_closing_work_failed); + + } else { + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + (void)unf_rport_ref_inc(v_rport); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x_0x%p) add link down to work(%p) queue succeed", + v_lport->port_id, v_rport->nport_id, v_rport, + &v_rport->closing_work); + } + + if (v_rport->nport_id > UNF_FC_FID_DOM_MGR) + return; + + if (scsi_id >= UNF_MAX_SCSI_ID) { + scsi_id = unf_get_scsi_id_by_wwpn(v_lport, v_rport->port_name); + if (scsi_id >= UNF_MAX_SCSI_ID) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_NORMAL, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%p) NPortId(0x%x) wwpn(0x%llx) option(0x%x) scsi_id(0x%x) is max than(0x%x)", + v_lport->port_id, v_rport, v_rport->nport_id, + v_rport->port_name, + v_rport->options, scsi_id, + UNF_MAX_SCSI_ID); + + return; + } + } + + wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; + ret = queue_delayed_work( + unf_work_queue, + &wwn_rport_info->loss_tmo_work, + (unsigned long)delay * msecs_to_jiffies(1000)); + if (!ret) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info] Port(0x%x) add RPort(0x%p) NPortId(0x%x) scsi_id(0x%x) wwpn(0x%llx) loss timeout work failed", + v_lport->port_id, v_rport, + v_rport->nport_id, scsi_id, + v_rport->port_name); + } +} + +static void unf_rport_closing_timeout(struct work_struct *v_work) +{ + /* closing --->>>(timeout)--->>> delete */ + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + unsigned long rport_flag = 0; + unsigned long disc_flag = 0; + void (*pfn_unf_rport_call_back)(void *, void *, unsigned int) = NULL; + + UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return); + + /* Get R_Port & L_Port & Disc */ + rport = container_of(v_work, struct unf_rport_s, closing_work); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, "[err]RPort is NULL"); + return; + } + + lport = rport->lport; + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x_0x%p) Port is NULL", + rport->nport_id, rport); + + /* Release directly (for timer) */ + unf_rport_ref_dec(rport); + return; + } + disc = &lport->disc; + + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + + /* 1. Update R_Port state: event_timeout --->>> state_delete */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_CLS_TIMEOUT); + + /* Check R_Port state */ + if (rport->rp_state != UNF_RPORT_ST_DELETE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) RPort(0x%x) closing timeout with error state(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + + /* Dec ref_cnt for timer */ + unf_rport_ref_dec(rport); + return; + } + + pfn_unf_rport_call_back = rport->pfn_unf_rport_call_back; + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + + /* 2. Put R_Port to delete list */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + list_del_init(&rport->entry_rport); + list_add_tail(&rport->entry_rport, &disc->list_delete_rports); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + /* 3. Report rport link down event to scsi */ + if (pfn_unf_rport_call_back) { /* unf_rport_call_back */ + pfn_unf_rport_call_back((void *)rport, (void *)rport->lport, + RETURN_OK); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x) callback is NULL", + rport->nport_id); + } + + /* 4. Remove/delete R_Port */ + unf_rport_ref_dec(rport); + unf_rport_ref_dec(rport); +} + +static void unf_rport_linkup_to_scsi(struct work_struct *v_work) +{ + struct fc_rport_identifiers rport_ids; + struct fc_rport *rport = NULL; + unsigned long flags = RETURN_OK; + struct unf_wwpn_rport_info_s *wwn_rport_info = NULL; + struct unf_rport_scsi_id_image_s *rport_scsi_table = NULL; + unsigned int scsi_id = 0; + + struct unf_lport_s *lport = NULL; + struct unf_rport_s *unf_rport = NULL; + + UNF_CHECK_VALID(0x3040, UNF_TRUE, v_work, return); + + unf_rport = container_of(v_work, struct unf_rport_s, start_work); + if (unlikely(!unf_rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort is NULL for work(%p)", v_work); + return; + } + + lport = unf_rport->lport; + if (unlikely(!lport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]RPort(0x%x_0x%p) Port is NULL", + unf_rport->nport_id, unf_rport); + return; + } + + /* 1. Alloc R_Port SCSI_ID (image table) */ + unf_rport->scsi_id = unf_alloc_scsi_id(lport, unf_rport); + if (unlikely(unf_rport->scsi_id == INVALID_VALUE32)) { + atomic_inc(&lport->scsi_session_add_failed); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) scsi_id(0x%x) is invalid", + lport->port_id, lport->nport_id, + unf_rport->nport_id, unf_rport, + unf_rport->port_name, unf_rport->scsi_id); + + /* NOTE: return */ + return; + } + + /* 2. Add rport to scsi */ + scsi_id = unf_rport->scsi_id; + rport_ids.node_name = unf_rport->node_name; + rport_ids.port_name = unf_rport->port_name; + rport_ids.port_id = unf_rport->nport_id; + rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; + rport = fc_remote_port_add(lport->host_info.p_scsi_host, + 0, &rport_ids); + if (unlikely(!rport)) { + atomic_inc(&lport->scsi_session_add_failed); + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) report link up to scsi failed", + lport->port_id, lport->nport_id, + unf_rport->nport_id, unf_rport, + unf_rport->port_name); + + unf_free_scsi_id(lport, scsi_id); + return; + } + + /* 3. Change rport role save local SCSI_ID to scsi rport */ + *((unsigned int *)rport->dd_data) = scsi_id; + rport->supported_classes = FC_COS_CLASS3; + rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; + fc_remote_port_rolechg(rport, rport_ids.roles); + + /* 4. Save scsi rport info to local R_Port */ + spin_lock_irqsave(&unf_rport->rport_state_lock, flags); + unf_rport->rport = rport; + spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); + + rport_scsi_table = &lport->rport_scsi_table; + spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); + wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; + wwn_rport_info->target_id = rport->scsi_target_id; + wwn_rport_info->rport = unf_rport; + atomic_set(&wwn_rport_info->en_scsi_state, UNF_SCSI_ST_ONLINE); + spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]port(0x%x_0x%x) rport(0x%x) wwpn(0x%llx) scsi_id(0x%x) link up to scsi succeed", + lport->port_id, lport->nport_id, + unf_rport->nport_id, unf_rport->port_name, + scsi_id); + + atomic_inc(&lport->scsi_session_add_success); +} + +static void unf_rport_open_timeout(struct work_struct *v_work) +{ + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3041, UNF_TRUE, v_work, return); + + rport = container_of(v_work, struct unf_rport_s, open_work.work); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort is NULL"); + + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flags); + lport = rport->lport; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) open work timeout with state(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + /* NOTE: R_Port state check */ + if (rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) { + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Dec ref_cnt for timer case */ + unf_rport_ref_dec(rport); + return; + } + + /* Report R_Port Link Down event */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_rport_enter_closing(rport); + + /* Dec ref_cnt for timer case */ + unf_rport_ref_dec(rport); + + UNF_REFERNCE_VAR(lport); +} + +static unsigned int unf_alloc_index_for_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned long rport_flag = 0; + unsigned long pool_flag = 0; + unsigned int alloc_indx = 0; + unsigned int max_rport = 0; + struct unf_rport_pool_s *rport_pool = NULL; + + rport_pool = &v_lport->rport_pool; + max_rport = v_lport->low_level_func.lport_cfg_items.max_login; + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, pool_flag); + while (alloc_indx < max_rport) { + if (!test_bit((int)alloc_indx, rport_pool->pul_rpi_bitmap)) { + /* Case for HIFC */ + if (unlikely(atomic_read( + &v_lport->port_no_operater_flag) == + UNF_LPORT_NOP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) is within NOP", + v_lport->port_id); + + spin_unlock_irqrestore( + &rport_pool->rport_free_pool_lock, + pool_flag); + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&v_rport->rport_state_lock, + rport_flag); + /* set R_Port index */ + v_rport->rport_index = alloc_indx; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) alloc index(0x%x) succeed", + v_lport->port_id, alloc_indx, + v_rport->nport_id); + + spin_unlock_irqrestore(&v_rport->rport_state_lock, + rport_flag); + + /* Set (index) bit */ + set_bit((int)alloc_indx, rport_pool->pul_rpi_bitmap); + + /* Break here */ + break; + } + alloc_indx++; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, pool_flag); + + if (alloc_indx == max_rport) + return UNF_RETURN_ERROR; + else + return RETURN_OK; +} + +static void unf_check_rport_pool_status(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_pool_s *rport_pool = NULL; + unsigned long flags = 0; + unsigned int max_rport = 0; + + UNF_CHECK_VALID(0x3045, UNF_TRUE, v_lport, return); + rport_pool = &lport->rport_pool; + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flags); + max_rport = lport->low_level_func.lport_cfg_items.max_login; + if ((rport_pool->rport_pool_completion) && + (max_rport == rport_pool->rport_pool_count)) { + complete(rport_pool->rport_pool_completion); + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flags); +} + +void unf_init_rport_params(struct unf_rport_s *v_rport, + struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3046, UNF_TRUE, rport, return); + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_set_rport_state(rport, UNF_RPORT_ST_INIT); + /* set callback function */ + rport->pfn_unf_rport_call_back = unf_rport_call_back; + rport->lport = v_lport; + rport->fcp_conf_needed = UNF_FALSE; + rport->tape_support_needed = UNF_FALSE; + rport->mas_retries = UNF_MAX_RETRY_COUNT; + rport->logo_retries = 0; + rport->retries = 0; + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN; + rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + rport->node_name = 0; + rport->port_name = INVALID_WWPN; + rport->disc_done = 0; + rport->scsi_id = INVALID_VALUE32; + rport->data_thread = NULL; + sema_init(&rport->task_sema, 0); + atomic_set(&rport->rport_ref_cnt, 0); + atomic_set(&rport->pending_io_cnt, 0); + rport->rport_alloc_jifs = jiffies; + + rport->ed_tov = UNF_DEFAULT_EDTOV + 500; + rport->ra_tov = UNF_DEFAULT_RATOV; + + INIT_WORK(&rport->closing_work, unf_rport_closing_timeout); + INIT_WORK(&rport->start_work, unf_rport_linkup_to_scsi); + INIT_DELAYED_WORK(&rport->recovery_work, unf_rport_recovery_timeout); + INIT_DELAYED_WORK(&rport->open_work, unf_rport_open_timeout); + + atomic_inc(&rport->rport_ref_cnt); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); +} + +static unsigned int unf_alloc_llrport_resource(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_nport_id) +{ + unsigned int ret = RETURN_OK; + struct unf_rport_info_s rport_info = { 0 }; + + struct unf_lport_s *lport = NULL; + + lport = v_lport->root_lport; + + if (lport->low_level_func.service_op.pfn_unf_alloc_rport_res) { + rport_info.nport_id = v_nport_id; + rport_info.rport_index = v_rport->rport_index; + rport_info.local_nport_id = v_lport->nport_id; /* sid */ + rport_info.port_name = 0; + + ret = lport->low_level_func.service_op.pfn_unf_alloc_rport_res( + lport->fc_port, + &rport_info); + } else { + ret = RETURN_OK; + } + + return ret; +} + +static void *unf_add_rport_to_busy_list(struct unf_lport_s *v_lport, + struct unf_rport_s *v_new_rport, + unsigned int v_nport_id) +{ + struct unf_rport_pool_s *rport_pool = NULL; + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rport_s *new_rport = v_new_rport; + struct unf_rport_s *old_rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_new_rport, return NULL); + + lport = v_lport->root_lport; + disc = &v_lport->disc; + UNF_CHECK_VALID(0x3046, UNF_TRUE, lport, return NULL); + rport_pool = &lport->rport_pool; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + /* According to N_Port_ID */ + old_rport = list_entry(node, struct unf_rport_s, entry_rport); + if (old_rport->nport_id == v_nport_id) + break; /* find by N_Port_ID */ + old_rport = NULL; + } + + if (old_rport) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Use old R_Port & Add new R_Port back to R_Port Pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + clear_bit((int)new_rport->rport_index, + rport_pool->pul_rpi_bitmap); + list_add_tail(&new_rport->entry_rport, + &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, + flag); + + unf_check_rport_pool_status(lport); + return (void *)old_rport; + } + + if (unf_alloc_llrport_resource(v_lport, new_rport, + v_nport_id != RETURN_OK)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Add new R_Port back to R_Port Pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + clear_bit((int)new_rport->rport_index, + rport_pool->pul_rpi_bitmap); + list_add_tail(&new_rport->entry_rport, + &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore( + &rport_pool->rport_free_pool_lock, flag); + + unf_check_rport_pool_status(lport); + + return NULL; + } + + /* Add new R_Port to busy list */ + list_add_tail(&new_rport->entry_rport, + &disc->list_busy_rports); + new_rport->nport_id = v_nport_id; /* set R_Port N_Port_ID */ + /* set L_Port N_Port_ID */ + new_rport->local_nport_id = v_lport->nport_id; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + unf_init_rport_params(new_rport, v_lport); + + return (void *)new_rport; +} + +void *unf_rport_get_free_and_init(void *v_lport, + unsigned int v_rport_type, + unsigned int v_nport_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_pool_s *rport_pool = NULL; + struct unf_disc_s *disc = NULL; + struct unf_disc_s *v_port_disc = NULL; + struct unf_rport_s *rport = NULL; + struct list_head *list_head = NULL; + unsigned long flag = 0; + struct unf_disc_rport_s *disc_rport = NULL; + + UNF_REFERNCE_VAR(v_rport_type); + UNF_REFERNCE_VAR(rport); + + UNF_CHECK_VALID(0x3046, UNF_TRUE, v_lport, return NULL); + lport = ((struct unf_lport_s *)v_lport)->root_lport; /* ROOT L_Port */ + UNF_CHECK_VALID(0x3047, UNF_TRUE, lport, return NULL); + + /* Check L_Port state: NOP */ + if (unlikely(atomic_read(&lport->port_no_operater_flag) == + UNF_LPORT_NOP)) { + return NULL; + } + + rport_pool = &lport->rport_pool; + disc = &lport->disc; + + /* 1. UNF_PORT_TYPE_DISC: Get from disc_rport_pool */ + if (v_rport_type == UNF_PORT_TYPE_DISC) { + v_port_disc = &(((struct unf_lport_s *)v_lport)->disc); + + /* NOTE: list_disc_rports_pool used + * with list_disc_rport_busy + */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if (!list_empty(&disc->disc_rport_mgr.list_disc_rports_pool)) { + /* Get & delete from Disc R_Port Pool & + * Add it to Busy list + */ + list_head = + (&disc->disc_rport_mgr.list_disc_rports_pool)->next; + list_del_init(list_head); + disc_rport = list_entry(list_head, + struct unf_disc_rport_s, + entry_rport); + /* Set R_Port N_Port_ID */ + disc_rport->nport_id = v_nport_id; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + /* Add to list_disc_rport_busy */ + spin_lock_irqsave(&v_port_disc->rport_busy_pool_lock, + flag); + list_add_tail( + list_head, + &v_port_disc->disc_rport_mgr.list_disc_rport_busy); + spin_unlock_irqrestore( + &v_port_disc->rport_busy_pool_lock, flag); + } else { + disc_rport = NULL; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } + + /* NOTE: return */ + return disc_rport; + } + + /* 2. UNF_PORT_TYPE_FC (rport_pool): Get from list_rports_pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + if (!list_empty(&rport_pool->list_rports_pool)) { + /* Get & delete from R_Port free Pool */ + list_head = (&rport_pool->list_rports_pool)->next; + list_del_init(list_head); + rport_pool->rport_pool_count--; + rport = list_entry(list_head, struct unf_rport_s, entry_rport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort pool is empty", + lport->port_id, lport->nport_id); + + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, + flag); + + /* NOTE: return */ + return NULL; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + /* 3. Alloc (& set bit) R_Port index */ + if (unf_alloc_index_for_rport(lport, rport) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate index for new RPort failed", + lport->nport_id); + + /* Alloc failed: Add R_Port back to R_Port Pool */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + list_add_tail(&rport->entry_rport, + &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, + flag); + + unf_check_rport_pool_status(lport); + return NULL; + } + + /* 4. Add R_Port to busy list */ + rport = unf_add_rport_to_busy_list(v_lport, rport, v_nport_id); + UNF_REFERNCE_VAR(rport); + + return (void *)rport; +} + +static void unf_reset_rport_attribute(struct unf_rport_s *v_rport) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3070, 1, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + v_rport->pfn_unf_rport_call_back = NULL; + v_rport->lport = NULL; + v_rport->node_name = INVALID_VALUE64; + v_rport->port_name = INVALID_WWPN; + v_rport->nport_id = INVALID_VALUE32; + v_rport->local_nport_id = INVALID_VALUE32; + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; + v_rport->ed_tov = UNF_DEFAULT_EDTOV; + v_rport->ra_tov = UNF_DEFAULT_RATOV; + v_rport->rport_index = INVALID_VALUE32; + v_rport->scsi_id = INVALID_VALUE32; + v_rport->rport_alloc_jifs = INVALID_VALUE64; + + /* ini or tgt */ + v_rport->options = 0; + + /* fcp conf */ + v_rport->fcp_conf_needed = UNF_FALSE; + + /* special req retry times */ + v_rport->retries = 0; + v_rport->logo_retries = 0; + + /* special req retry times */ + v_rport->mas_retries = UNF_MAX_RETRY_COUNT; + + /* for target mode */ + v_rport->session = NULL; + v_rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN; + v_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; + v_rport->rp_state = UNF_RPORT_ST_INIT; + v_rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + v_rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; + v_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + v_rport->disc_done = 0; + + /* for scsi */ + v_rport->data_thread = NULL; + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); +} + +static unsigned int unf_rport_remove(void *v_rport) +{ + /* remove_old_rport/... --->>> rport_ref_dec --->>> rport_remove */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_rport_pool_s *rport_pool = NULL; + unsigned long flag = 0; + unsigned int rport_index = 0; + + UNF_CHECK_VALID(0x3050, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + rport = (struct unf_rport_s *)v_rport; + lport = rport->lport; + UNF_CHECK_VALID(0x3051, UNF_TRUE, + lport, return UNF_RETURN_ERROR); + rport_pool = &((struct unf_lport_s *)lport->root_lport)->rport_pool; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Remove RPort(0x%p) with remote_nport_id(0x%x) local_nport_id(0x%x)", + rport, rport->nport_id, rport->local_nport_id); + + /* 1. Terminate open exchange before rport remove: set ABORT tag */ + unf_cm_xchg_mgr_abort_io_by_id(lport, rport, + rport->nport_id, lport->nport_id, 0); + + /* 2. Abort sfp exchange before rport remove */ + unf_cm_xchg_mgr_abort_sfs_by_id(lport, rport, + rport->nport_id, lport->nport_id); + + /* 3. Release R_Port resource: session reset/delete */ + (void)unf_release_rport_res(lport, rport); + + /* 4.1 Delete R_Port from disc destroy/delete list */ + spin_lock_irqsave(&lport->disc.rport_busy_pool_lock, flag); + list_del_init(&rport->entry_rport); + spin_unlock_irqrestore(&lport->disc.rport_busy_pool_lock, flag); + + rport_index = rport->rport_index; /* according to bitmap */ + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) release RPort(0x%x_%p) with index(0x%x)", + lport->port_id, rport->nport_id, rport, rport->rport_index); + + unf_reset_rport_attribute(rport); + + /* 4.2 Add rport to --->>> rport_pool (free pool) & clear bitmap */ + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + if (lport->low_level_func.rport_release_type == + UNF_LOW_LEVEL_RELEASE_RPORT_SYNC) { + clear_bit((int)rport_index, rport_pool->pul_rpi_bitmap); + } + list_add_tail(&rport->entry_rport, &rport_pool->list_rports_pool); + rport_pool->rport_pool_count++; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + unf_check_rport_pool_status((struct unf_lport_s *)lport->root_lport); + up(&rport->task_sema); + + return RETURN_OK; +} + +void unf_rport_ref_dec(struct unf_rport_s *v_rport) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3011, UNF_TRUE, v_rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Rport(0x%x) reference count is %d", + v_rport->nport_id, atomic_read(&v_rport->rport_ref_cnt)); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + if (atomic_dec_and_test(&v_rport->rport_ref_cnt)) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + (void)unf_rport_remove(v_rport); + } else { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + } +} + +static enum unf_rport_login_state_e unf_rport_stat_init( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_ENTER_PLOGI: /* PLOGI --->>> PLOGI_WAIT */ + en_next_state = UNF_RPORT_ST_PLOGI_WAIT; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> Closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_plogi_wait( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_ENTER_PRLI: /* PRLI --->>> PRLI_WAIT */ + en_next_state = UNF_RPORT_ST_PRLI_WAIT; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* Recovery --->>> Ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_prli_wait( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_READY: /* Ready --->>> Ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> Closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* Recovery --->>> Ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_ready( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_LOGO: /* LOGO --->>> LOGO */ + en_next_state = UNF_RPORT_ST_LOGO; + break; + + case UNF_EVENT_RPORT_LINK_DOWN: /* Link Down --->>> closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_ENTER_PLOGI: /* ready --->>> plogi_wait */ + en_next_state = UNF_RPORT_ST_PLOGI_WAIT; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_closing( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_CLS_TIMEOUT: /* timeout --->>> delete */ + en_next_state = UNF_RPORT_ST_DELETE; + break; + + case UNF_EVENT_RPORT_RELOGIN: /* relogin --->>> INIT */ + en_next_state = UNF_RPORT_ST_INIT; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* recovery --->>> ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +static enum unf_rport_login_state_e unf_rport_stat_logo( + enum unf_rport_login_state_e v_old_state, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + switch (v_event) { + case UNF_EVENT_RPORT_NORMAL_ENTER: /* normal enter --->>> closing */ + en_next_state = UNF_RPORT_ST_CLOSING; + break; + + case UNF_EVENT_RPORT_RECOVERY: /* recovery --->>> ready */ + en_next_state = UNF_RPORT_ST_READY; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +void unf_rport_state_ma(struct unf_rport_s *v_rport, + enum unf_rport_event_e v_event) +{ + enum unf_rport_login_state_e en_old_state = UNF_RPORT_ST_INIT; + enum unf_rport_login_state_e en_next_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3056, UNF_TRUE, v_rport, return); + + en_old_state = v_rport->rp_state; + + switch (v_rport->rp_state) { + /* State INIT */ + case UNF_RPORT_ST_INIT: + en_next_state = unf_rport_stat_init(en_old_state, v_event); + break; + + /* State PLOGI Wait */ + case UNF_RPORT_ST_PLOGI_WAIT: + en_next_state = unf_rport_stat_plogi_wait(en_old_state, + v_event); + break; + + /* State PRLI Wait */ + case UNF_RPORT_ST_PRLI_WAIT: + en_next_state = unf_rport_stat_prli_wait(en_old_state, + v_event); + break; + + /* State LOGO */ + case UNF_RPORT_ST_LOGO: + en_next_state = unf_rport_stat_logo(en_old_state, v_event); + break; + + /* State CLOSING */ + case UNF_RPORT_ST_CLOSING: + en_next_state = unf_rport_stat_closing(en_old_state, v_event); + break; + + /* State READY */ + case UNF_RPORT_ST_READY: + en_next_state = unf_rport_stat_ready(en_old_state, v_event); + break; + + /* State DELETE */ + case UNF_RPORT_ST_DELETE: + default: + en_next_state = UNF_RPORT_ST_INIT; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RPort(0x%x) hold state(0x%x)", + v_rport->nport_id, v_rport->rp_state); + break; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MINOR, + "[info]RPort(0x%x) with oldstate(0x%x) event(0x%x) nextstate(0x%x)", + v_rport->nport_id, en_old_state, v_event, en_next_state); + + unf_set_rport_state(v_rport, en_next_state); +} + +void unf_clean_linkdown_rport(struct unf_lport_s *v_lport) +{ + /* for L_Port's R_Port(s) */ + struct unf_disc_s *disc = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_rport_s *rport = NULL; + struct unf_lport_s *lport = NULL; + unsigned long disc_lock_flag = 0; + unsigned long rport_lock_flag = 0; + + UNF_CHECK_VALID(0x3058, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + /* for each busy R_Port */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_lock_flag); + /* --->>> busy_rports */ + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + + /* 1. Prevent process Repeatly: Closing */ + spin_lock_irqsave(&rport->rport_state_lock, rport_lock_flag); + if (rport->rp_state == UNF_RPORT_ST_CLOSING) { + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_lock_flag); + continue; + } + + /* 2. Increase ref_cnt to protect R_Port */ + if (unf_rport_ref_inc(rport) != RETURN_OK) { + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_lock_flag); + continue; + } + + /* 3. Update R_Port state: + * Link Down Event --->>> closing state + */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); + + /* 4. Put R_Port from busy to destroy list */ + list_del_init(&rport->entry_rport); + list_add_tail(&rport->entry_rport, &disc->list_destroy_rports); + + lport = rport->lport; + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_lock_flag); + + /* 5. Schedule Closing work (Enqueuing workqueue) */ + unf_schedule_closing_work(lport, rport); + + /* 6. decrease R_Port ref_cnt (company with 2) */ + unf_rport_ref_dec(rport); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_lock_flag); +} + +void unf_rport_enter_closing(struct unf_rport_s *v_rport) +{ + /* + * call by + * 1. with RSCN processer + * 2. with LOGOUT processer + ** + * from + * 1. R_Port Link Down + * 2. R_Port enter LOGO + */ + unsigned long rport_lock_flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x3059, UNF_TRUE, v_rport, return); + + /* 1. Increase ref_cnt to protect R_Port */ + spin_lock_irqsave(&v_rport->rport_state_lock, rport_lock_flag); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, + rport_lock_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return; + } + + /* NOTE: R_Port state has been set(with closing) */ + + lport = v_rport->lport; + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_lock_flag); + + /* 2. Put R_Port from busy to destroy list */ + disc = &lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, rport_lock_flag); + list_del_init(&v_rport->entry_rport); + list_add_tail(&v_rport->entry_rport, &disc->list_destroy_rports); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, rport_lock_flag); + + /* 3. Schedule Closing work (Enqueuing workqueue) */ + unf_schedule_closing_work(lport, v_rport); + + /* 4. dec R_Port ref_cnt */ + unf_rport_ref_dec(v_rport); +} + +void unf_rport_error_recovery(struct unf_rport_s *v_rport) +{ + unsigned long delay = 0; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3060, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + return; + } + + /* Check R_Port state */ + if ((v_rport->rp_state == UNF_RPORT_ST_CLOSING) || + (v_rport->rp_state == UNF_RPORT_ST_DELETE)) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RPort(0x%x_0x%p) offline and no need process", + v_rport->nport_id, v_rport); + + unf_rport_ref_dec(v_rport); + return; + } + + /* Check repeatability with recovery work */ + if (delayed_work_pending(&v_rport->recovery_work)) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RPort(0x%x_0x%p) recovery work is running and no need process", + v_rport->nport_id, v_rport); + + unf_rport_ref_dec(v_rport); + return; + } + + /* NOTE: Re-login or Logout directly (recovery work) */ + if (v_rport->retries < v_rport->mas_retries) { + v_rport->retries++; + delay = (unsigned long)v_rport->ed_tov; + + if (queue_delayed_work(unf_work_queue, + &v_rport->recovery_work, + (unsigned long)msecs_to_jiffies( + (unsigned int)delay))) { + /* Inc ref_cnt: corresponding to this work timer */ + (void)unf_rport_ref_inc(v_rport); + } + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) state(0x%x) retry login failed", + v_rport->nport_id, v_rport, v_rport->rp_state); + + /* Update R_Port state: LOGO event --->>> ST_LOGO */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* Enter LOGO processer */ + unf_rport_enter_logo(v_rport->lport, v_rport); + } + + unf_rport_ref_dec(v_rport); +} + +static unsigned int unf_rport_reuse_only(struct unf_rport_s *v_rport) +{ + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3061, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* R_Port with delete state */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return UNF_RETURN_ERROR; + } + + /* R_Port State check: delete */ + if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) || + (v_rport->rp_state == UNF_RPORT_ST_CLOSING)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) state(0x%x) is delete or closing no need process", + v_rport->nport_id, v_rport, v_rport->rp_state); + + ret = UNF_RETURN_ERROR; + } + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + unf_rport_ref_dec(v_rport); + + return ret; +} + +static unsigned int unf_rport_reuse_recover(struct unf_rport_s *v_rport) +{ + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3062, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* R_Port with delete state */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return UNF_RETURN_ERROR; + } + + /* R_Port state check: delete */ + if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) || + (v_rport->rp_state == UNF_RPORT_ST_CLOSING)) { + ret = UNF_RETURN_ERROR; + } + + /* Update R_Port state: recovery --->>> ready */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_RECOVERY); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + unf_rport_ref_dec(v_rport); + + return ret; +} + +static unsigned int unf_rport_reuse_init(struct unf_rport_s *v_rport) +{ + unsigned long flage = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3063, UNF_TRUE, + v_rport, return UNF_RETURN_ERROR); + + spin_lock_irqsave(&v_rport->rport_state_lock, flage); + ret = unf_rport_ref_inc(v_rport); + if (ret != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flage); + + /* R_Port with delete state */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]RPort(0x%x_0x%p) is removing and no need process", + v_rport->nport_id, v_rport); + + return UNF_RETURN_ERROR; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]RPort(0x%x)'s state is 0x%x with use_init flag", + v_rport->nport_id, v_rport->rp_state); + + /* R_Port State check: delete */ + if ((v_rport->rp_state == UNF_RPORT_ST_DELETE) || + (v_rport->rp_state == UNF_RPORT_ST_CLOSING)) { + ret = UNF_RETURN_ERROR; + } else { + /* Update R_Port state: re-enter Init state */ + unf_set_rport_state(v_rport, UNF_RPORT_ST_INIT); + } + spin_unlock_irqrestore(&v_rport->rport_state_lock, flage); + + unf_rport_ref_dec(v_rport); + + return ret; +} + +struct unf_rport_s *unf_get_rport_by_nport_id(struct unf_lport_s *v_lport, + unsigned int nport_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + struct unf_rport_s *find_rport = NULL; + + UNF_CHECK_VALID(0x3048, UNF_TRUE, v_lport, return NULL); + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + + /* for each r_port from rport_busy_list: compare N_Port_ID */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + if (rport && rport->nport_id == nport_id) { + find_rport = rport; + break; + } + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + return find_rport; +} + +struct unf_rport_s *unf_get_safe_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + enum unf_rport_reuse_flag_e v_reuse_flag, + unsigned int v_nport_id) +{ + /* + * New add or plug + * + * retry_flogi --->>> reuse_only + * name_server_register --->>> reuse_only + * SNS_plogi --->>> reuse_only + * enter_flogi --->>> reuse_only + * logout --->>> reuse_only + * flogi_handler --->>> reuse_only + * plogi_handler --->>> reuse_only + * adisc_handler --->>> reuse_recovery + * logout_handler --->>> reuse_init + * prlo_handler --->>> reuse_init + * login_with_loop --->>> reuse_only + * gffid_callback --->>> reuse_only + * delay_plogi --->>> reuse_only + * gffid_rjt --->>> reuse_only + * gffid_rsp_unknown --->>> reuse_only + * gpnid_acc --->>> reuse_init + * fdisc_callback --->>> reuse_only + * flogi_acc --->>> reuse_only + * plogi_acc --->>> reuse_only + * logo_callback --->>> reuse_init + * rffid_callback --->>> reuse_only + */ +#define UNF_AVOID_LINK_FLASH_TIME 3000 + + struct unf_rport_s *rport = v_rport; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3075, UNF_TRUE, v_lport, return NULL); + + /* 1. Alloc New R_Port or Update R_Port Property */ + if (!rport) { + /* If NULL, get/Alloc new node + * (R_Port from R_Port pool) directly + */ + rport = unf_rport_get_free_and_init(v_lport, UNF_PORT_TYPE_FC, + v_nport_id); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) get exist RPort(0x%x) with state(0x%x) and reuse_flag(0x%x)", + v_lport->port_id, rport->nport_id, + rport->rp_state, v_reuse_flag); + + switch (v_reuse_flag) { + case UNF_RPORT_REUSE_ONLY: + ret = unf_rport_reuse_only(rport); + if (ret != RETURN_OK) { + /* R_Port within delete list: need get new */ + rport = unf_rport_get_free_and_init( + v_lport, + UNF_PORT_TYPE_FC, + v_nport_id); + } + break; + + case UNF_RPORT_REUSE_INIT: + ret = unf_rport_reuse_init(rport); + if (ret != RETURN_OK) { + /* R_Port within delete list: need get new */ + rport = unf_rport_get_free_and_init( + v_lport, + UNF_PORT_TYPE_FC, + v_nport_id); + } + break; + + case UNF_RPORT_REUSE_RECOVER: + ret = unf_rport_reuse_recover(rport); + if (ret != RETURN_OK) { + /* R_Port within delete list, + * NOTE: do nothing + */ + rport = NULL; + } + break; + + default: + break; + } + } + + return rport; +} + +unsigned int unf_get_port_feature(unsigned long long v_wwpn) +{ + struct unf_rport_feature_recard_s *port_fea = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + struct list_head list_temp_node; + + spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags); + list_for_each_safe(node, next_node, &port_fea_pool->list_busy_head) { + port_fea = list_entry(node, struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return port_fea->port_feature; + } + } + + list_for_each_safe(node, next_node, &port_fea_pool->list_free_head) { + port_fea = list_entry(node, struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return port_fea->port_feature; + } + } + + /* can't find wwpn */ + if (list_empty(&port_fea_pool->list_free_head)) { + /* free is empty, transport busy to free */ + list_temp_node = port_fea_pool->list_free_head; + port_fea_pool->list_free_head = port_fea_pool->list_busy_head; + port_fea_pool->list_busy_head = list_temp_node; + } + + port_fea = list_entry((&port_fea_pool->list_free_head)->prev, + struct unf_rport_feature_recard_s, + entry_feature); + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, &port_fea_pool->list_busy_head); + + port_fea->wwpn = v_wwpn; + port_fea->port_feature = UNF_PORT_MODE_UNKNOWN; + + spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags); + return UNF_PORT_MODE_UNKNOWN; +} + +void unf_update_port_feature(unsigned long long v_wwpn, + unsigned int v_port_feature) +{ + struct unf_rport_feature_recard_s *port_fea = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + + spin_lock_irqsave(&port_fea_pool->port_fea_pool_lock, flags); + list_for_each_safe(node, next_node, &port_fea_pool->list_busy_head) { + port_fea = list_entry(node, + struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + port_fea->port_feature = v_port_feature; + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return; + } + } + + list_for_each_safe(node, next_node, &port_fea_pool->list_free_head) { + port_fea = list_entry(node, struct unf_rport_feature_recard_s, + entry_feature); + + if (v_wwpn == port_fea->wwpn) { + port_fea->port_feature = v_port_feature; + list_del(&port_fea->entry_feature); + list_add(&port_fea->entry_feature, + &port_fea_pool->list_busy_head); + + spin_unlock_irqrestore( + &port_fea_pool->port_fea_pool_lock, flags); + + return; + } + } + + spin_unlock_irqrestore(&port_fea_pool->port_fea_pool_lock, flags); +} diff --git a/drivers/scsi/huawei/hifc/unf_rport.h b/drivers/scsi/huawei/hifc/unf_rport.h new file mode 100644 index 000000000000..5e1e6551b94a --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_rport.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_RPORT_H +#define __UNF_RPORT_H + +#define UNF_MAX_SCSI_ID 2048 +#define UNF_LOSE_TMO 30 +#define UNF_RPORT_INVALID_INDEX 0xffff + +/* RSCN compare DISC list with local RPort macro */ +#define UNF_RPORT_NEED_PROCESS 0x1 +#define UNF_RPORT_ONLY_IN_DISC_PROCESS 0x2 +#define UNF_RPORT_ONLY_IN_LOCAL_PROCESS 0x3 +#define UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS 0x4 +#define UNF_RPORT_NOT_NEED_PROCESS 0x5 + +#define UNF_ECHO_SEND_MAX_TIMES 1 + +extern struct unf_rport_feature_pool_s *port_fea_pool; + +enum unf_rport_login_state_e { + UNF_RPORT_ST_INIT = 0x1000, /* initialized */ + UNF_RPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */ + UNF_RPORT_ST_PRLI_WAIT, /* waiting for PRLI completion */ + UNF_RPORT_ST_READY, /* ready for use */ + UNF_RPORT_ST_LOGO, /* port logout sent */ + UNF_RPORT_ST_CLOSING, /* being closed */ + UNF_RPORT_ST_DELETE, /* port being deleted */ + UNF_RPORT_ST_BUTT +}; + +enum unf_rport_event_e { + UNF_EVENT_RPORT_NORMAL_ENTER = 0x9000, + UNF_EVENT_RPORT_ENTER_PLOGI = 0x9001, + UNF_EVENT_RPORT_ENTER_PRLI = 0x9002, + UNF_EVENT_RPORT_READY = 0x9003, + UNF_EVENT_RPORT_LOGO = 0x9004, + UNF_EVENT_RPORT_CLS_TIMEOUT = 0x9005, + UNF_EVENT_RPORT_RECOVERY = 0x9006, + UNF_EVENT_RPORT_RELOGIN = 0x9007, + UNF_EVENT_RPORT_LINK_DOWN = 0x9008, + UNF_EVENT_RPORT_BUTT +}; + +/* RPort local link state */ +enum unf_port_state_e { + UNF_PORT_STATE_LINKUP = 0x1001, + UNF_PORT_STATE_LINKDOWN = 0x1002 +}; + +enum unf_rport_reuse_flag_e { + UNF_RPORT_REUSE_ONLY = 0x1001, + UNF_RPORT_REUSE_INIT = 0x1002, + UNF_RPORT_REUSE_RECOVER = 0x1003 +}; + +struct unf_disc_rport_s { + /* RPort entry */ + struct list_head entry_rport; + + unsigned int nport_id; /* Remote port NPortID */ + unsigned int disc_done; /* 1:Disc done */ +}; + +struct unf_rport_feature_pool_s { + struct list_head list_busy_head; + struct list_head list_free_head; + void *p_port_feature_pool_addr; + spinlock_t port_fea_pool_lock; +}; + +struct unf_rport_feature_recard_s { + struct list_head entry_feature; + unsigned long long wwpn; + unsigned int port_feature; + unsigned int reserved; +}; + +struct unf_os_thread_private_data_s { + struct list_head list; + spinlock_t spin_lock; + struct task_struct *thread; + unsigned int in_process; + unsigned int cpu_id; + atomic_t user_count; +}; + +/* Remote Port struct */ +struct unf_rport_s { + unsigned int max_frame_size; + unsigned int supported_classes; + + /* Dynamic Attributes */ + /* Remote Port loss timeout in seconds. */ + unsigned int dev_loss_tmo; + + unsigned long long node_name; + unsigned long long port_name; + unsigned int nport_id; /* Remote port NPortID */ + unsigned int local_nport_id; + + unsigned int roles; + + /* Remote port local INI state */ + enum unf_port_state_e lport_ini_state; + enum unf_port_state_e last_lport_ini_state; + + /* Remote port local TGT state */ + enum unf_port_state_e lport_tgt_state; + enum unf_port_state_e last_lport_tgt_state; + + /* Port Type:fc */ + unsigned int port_type; + + /* RPort reference counter */ + atomic_t rport_ref_cnt; + + /* Pending IO count */ + atomic_t pending_io_cnt; + + /* RPort entry */ + struct list_head entry_rport; + + /* Port State,delay reclaim when uiRpState == complete. */ + enum unf_rport_login_state_e rp_state; + unsigned int disc_done; /* 1:Disc done */ + + struct unf_lport_s *lport; + void *rport; + spinlock_t rport_state_lock; + + /* Port attribution */ + unsigned int ed_tov; + unsigned int ra_tov; + unsigned int options; /* ini or tgt */ + unsigned int last_report_linkup_options; + unsigned int fcp_conf_needed; /* INI Rport send FCP CONF flag */ + unsigned int tape_support_needed; /* INI tape support flag */ + unsigned int retries; /* special req retry times */ + unsigned int logo_retries; /* logo error recovery retry times */ + unsigned int mas_retries; /* special req retry times */ + /* Rport alloc jiffies */ + unsigned long long rport_alloc_jifs; + + void *session; + + /* binding with SCSI */ + unsigned int scsi_id; + + /* disc list compare flag */ + unsigned int rscn_position; + + unsigned int rport_index; + + /* RPort timer,closing status */ + struct work_struct closing_work; + + /* RPort timer,rport linkup */ + struct work_struct start_work; + + /* RPort timer,recovery */ + struct delayed_work recovery_work; + + /* RPort timer,TGT mode,PRLI waiting */ + struct delayed_work open_work; + + struct semaphore task_sema; + /* Callback after rport Ready/delete.[with state:ok/fail]. + * Creat/free TGT session here + * input : L_Port,R_Port,state:ready + * --creat session/delete--free session + */ + void (*pfn_unf_rport_call_back)(void *, void *, unsigned int); + + struct unf_os_thread_private_data_s *data_thread; +}; + +#define UNF_IO_RESULT_CNT(v_scsi_table, v_scsi_id, v_io_result) \ + do { \ + if (likely(((v_io_result) < UNF_MAX_IO_RETURN_VALUE) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic64_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->io_done_cnt[v_io_result]); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] io return value(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_result, v_scsi_id); \ + } \ + } while (0) + +#define UNF_SCSI_CMD_CNT(v_scsi_table, v_scsi_id, v_io_type) \ + do { \ + if (likely(((v_io_type) < UNF_MAX_SCSI_CMD) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic64_inc(&((v_scsi_table->wwn_rport_info_table[v_scsi_id]).dfx_counter->scsi_cmd_cnt[v_io_type])); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_type, v_scsi_id); \ + } \ + } while (0) + +#define UNF_SCSI_ERROR_HANDLE_CNT(v_scsi_table, v_scsi_id, v_io_type) \ + do { \ + if (likely(((v_io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->error_handle[v_io_type]); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_type, v_scsi_id); \ + } \ + } while (0) + +#define UNF_SCSI_ERROR_HANDLE_RESULT_CNT(v_scsi_table, v_scsi_id, v_io_type) \ + do { \ + if (likely(((v_io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \ + ((v_scsi_id) < UNF_MAX_SCSI_ID) && \ + ((v_scsi_table)->wwn_rport_info_table) && \ + (v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter))) { \ + atomic_inc(&v_scsi_table->wwn_rport_info_table[v_scsi_id].dfx_counter->error_handle_result[v_io_type]); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, \ + UNF_LOG_EQUIP_ATT, UNF_ERR, \ + "[err] scsi_cmd(0x%x) or scsi_id(0x%x) is invalid", \ + v_io_type, v_scsi_id); \ + } \ + } while (0) + +void unf_rport_state_ma(struct unf_rport_s *v_rport, + enum unf_rport_event_e v_event); +void unf_update_lport_state_by_linkup_event(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int rport_att); +void unf_rport_enter_closing(struct unf_rport_s *v_rport); +void unf_clean_linkdown_rport(struct unf_lport_s *v_lport); +void unf_rport_error_recovery(struct unf_rport_s *v_rport); +struct unf_rport_s *unf_get_rport_by_nport_id(struct unf_lport_s *v_lport, + unsigned int nport_id); +void unf_rport_enter_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_rport_ref_inc(struct unf_rport_s *v_rport); +void unf_rport_ref_dec(struct unf_rport_s *v_rport); + +struct unf_rport_s *unf_rport_set_qualifier_key_reuse( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport_by_nport_id, + struct unf_rport_s *v_rport_by_wwpn, + unsigned long long v_wwpn, + unsigned int v_sid); +void unf_rport_delay_login(struct unf_rport_s *v_rport); +struct unf_rport_s *unf_find_valid_rport(struct unf_lport_s *v_lport, + unsigned long long v_wwpn, + unsigned int v_sid); +void unf_rport_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +struct unf_rport_s *unf_get_safe_rport(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + enum unf_rport_reuse_flag_e v_reuse_flag, + unsigned int v_nport_id); +void *unf_rport_get_free_and_init(void *v_lport, + unsigned int v_port_type, + unsigned int v_nport_id); +unsigned int unf_free_scsi_id(struct unf_lport_s *v_lport, + unsigned int v_scsi_id); +void unf_schedule_closing_work(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +void unf_sesion_loss_timeout(struct work_struct *v_work); +unsigned int unf_get_port_feature(unsigned long long v_wwpn); +void unf_update_port_feature(unsigned long long v_wwpn, + unsigned int v_port_feature); + +#endif +