From: Chenguangli chenguangli2@huawei.com
driver inclusion category: feature bugzilla: NA
-----------------------------------------------------------------------
This module is used to process services related to the FC protocol.
Signed-off-by: Chenguangli chenguangli2@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Acked-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/scsi/Kconfig | 8 +- drivers/scsi/Makefile | 3 +- drivers/scsi/huawei/Kconfig | 21 + drivers/scsi/huawei/Makefile | 5 + drivers/scsi/huawei/hifc/Kconfig | 11 + drivers/scsi/huawei/hifc/Makefile | 42 + drivers/scsi/huawei/hifc/hifc_service.c | 3076 +++++++ drivers/scsi/huawei/hifc/hifc_service.h | 248 + drivers/scsi/huawei/hifc/unf_disc.c | 1320 +++ drivers/scsi/huawei/hifc/unf_disc.h | 53 + drivers/scsi/huawei/hifc/unf_event.c | 557 ++ drivers/scsi/huawei/hifc/unf_event.h | 101 + drivers/scsi/huawei/hifc/unf_exchg.c | 3632 +++++++++ drivers/scsi/huawei/hifc/unf_exchg.h | 513 ++ drivers/scsi/huawei/hifc/unf_service.c | 9873 +++++++++++++++++++++++ drivers/scsi/huawei/hifc/unf_service.h | 83 + 16 files changed, 19542 insertions(+), 4 deletions(-) create mode 100644 drivers/scsi/huawei/Kconfig create mode 100644 drivers/scsi/huawei/Makefile create mode 100644 drivers/scsi/huawei/hifc/Kconfig create mode 100644 drivers/scsi/huawei/hifc/Makefile create mode 100644 drivers/scsi/huawei/hifc/hifc_service.c create mode 100644 drivers/scsi/huawei/hifc/hifc_service.h create mode 100644 drivers/scsi/huawei/hifc/unf_disc.c create mode 100644 drivers/scsi/huawei/hifc/unf_disc.h create mode 100644 drivers/scsi/huawei/hifc/unf_event.c create mode 100644 drivers/scsi/huawei/hifc/unf_event.h create mode 100644 drivers/scsi/huawei/hifc/unf_exchg.c create mode 100644 drivers/scsi/huawei/hifc/unf_exchg.h create mode 100644 drivers/scsi/huawei/hifc/unf_service.c create mode 100644 drivers/scsi/huawei/hifc/unf_service.h
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index ec2708f78120..00006841fefc 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -179,7 +179,7 @@ config CHR_DEV_SCH changers are listed as "Type: Medium Changer" in /proc/scsi/scsi. If you have such hardware and want to use it with linux, say Y here. Check file:Documentation/scsi/scsi-changer.txt for details. - + If you want to compile this as a module ( = code which can be inserted in and removed from the running kernel whenever you want), say M here and read file:Documentation/kbuild/modules.txt and @@ -475,7 +475,7 @@ config SCSI_DPT_I2O tristate "Adaptec I2O RAID support " depends on SCSI && PCI && VIRT_TO_BUS help - This driver supports all of Adaptec's I2O based RAID controllers as + This driver supports all of Adaptec's I2O based RAID controllers as well as the DPT SmartRaid V cards. This is an Adaptec maintained driver by Deanna Bonds. See file:Documentation/scsi/dpti.txt.
@@ -638,7 +638,7 @@ config SCSI_GDTH ---help--- Formerly called GDT SCSI Disk Array Controller Support.
- This is a driver for RAID/SCSI Disk Array Controllers (EISA/ISA/PCI) + This is a driver for RAID/SCSI Disk Array Controllers (EISA/ISA/PCI) manufactured by Intel Corporation/ICP vortex GmbH. It is documented in the kernel source in file:drivers/scsi/gdth.c and file:drivers/scsi/gdth.h. @@ -1113,6 +1113,8 @@ source "drivers/scsi/qla4xxx/Kconfig" source "drivers/scsi/qedi/Kconfig" source "drivers/scsi/qedf/Kconfig"
+source "drivers/scsi/huawei/Kconfig" + config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 6d71b2a9592b..2973693f6dcc 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -79,10 +79,11 @@ obj-$(CONFIG_SCSI_IPS) += ips.o obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o -obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o +obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ obj-$(CONFIG_SCSI_LPFC) += lpfc/ +obj-$(CONFIG_SCSI_HUAWEI_FC) += huawei/ obj-$(CONFIG_SCSI_BFA_FC) += bfa/ obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/ obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o diff --git a/drivers/scsi/huawei/Kconfig b/drivers/scsi/huawei/Kconfig new file mode 100644 index 000000000000..a9fbdef9b4b3 --- /dev/null +++ b/drivers/scsi/huawei/Kconfig @@ -0,0 +1,21 @@ +# +# Huawei driver configuration +# + +config SCSI_HUAWEI_FC + tristate "Huawei devices" + depends on PCI && SCSI + depends on SCSI_FC_ATTRS + default m + ---help--- + If you have a Fibre Channel PCI card belonging to this class, say Y. + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Huawei cards. If you say Y, you will be asked + for your specific card in the following questions. + +if SCSI_HUAWEI_FC + +source "drivers/scsi/huawei/hifc/Kconfig" + +endif # SCSI_HUAWEI_FC diff --git a/drivers/scsi/huawei/Makefile b/drivers/scsi/huawei/Makefile new file mode 100644 index 000000000000..fa48694cc166 --- /dev/null +++ b/drivers/scsi/huawei/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Huawei device drivers. +# + +obj-$(CONFIG_SCSI_FC_HIFC) += hifc/ diff --git a/drivers/scsi/huawei/hifc/Kconfig b/drivers/scsi/huawei/hifc/Kconfig new file mode 100644 index 000000000000..79c7954a0735 --- /dev/null +++ b/drivers/scsi/huawei/hifc/Kconfig @@ -0,0 +1,11 @@ +# +# Huawei driver configuration +# +config SCSI_FC_HIFC + tristate "Huawei hifc Fibre Channel Support" + default m + depends on PCI && SCSI + depends on SCSI_FC_ATTRS + ---help--- + This driver supports Huawei Fibre Channel PCI and + PCIE host adapters. diff --git a/drivers/scsi/huawei/hifc/Makefile b/drivers/scsi/huawei/hifc/Makefile new file mode 100644 index 000000000000..0128086c75d9 --- /dev/null +++ b/drivers/scsi/huawei/hifc/Makefile @@ -0,0 +1,42 @@ +obj-$(CONFIG_SCSI_FC_HIFC) += hifc.o + +hifc-objs += hifc_utils.o +hifc-objs += hifc_hba.o +hifc-objs += hifc_portmng.o + +hifc-objs += hifc_module.o +hifc-objs += hifc_chipitf.o +hifc-objs += hifc_io.o +hifc-objs += hifc_queue.o +hifc-objs += hifc_service.o +hifc-objs += hifc_wqe.o +hifc-objs += hifc_cfg.o +hifc-objs += hifc_lld.o + +hifc-objs += unf_io.o +hifc-objs += unf_io_abnormal.o +hifc-objs += unf_scsi.o +hifc-objs += unf_init.o +hifc-objs += unf_event.o +hifc-objs += unf_exchg.o +hifc-objs += unf_lport.o +hifc-objs += unf_disc.o +hifc-objs += unf_rport.o +hifc-objs += unf_service.o +hifc-objs += unf_portman.o +hifc-objs += unf_npiv.o +hifc-objs += hifc_sml.o +hifc-objs += hifc_tool.o +hifc-objs += hifc_tool_hw.o +hifc-objs += hifc_dbgtool_knl.o + +hifc-objs += hifc_hwif.o +hifc-objs += hifc_eqs.o +hifc-objs += hifc_api_cmd.o +hifc-objs += hifc_mgmt.o +hifc-objs += hifc_wq.o +hifc-objs += hifc_cmdq.o +hifc-objs += hifc_hwdev.o +hifc-objs += hifc_cqm_main.o +hifc-objs += hifc_cqm_object.o + diff --git a/drivers/scsi/huawei/hifc/hifc_service.c b/drivers/scsi/huawei/hifc/hifc_service.c new file mode 100644 index 000000000000..52c9ad7670ee --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_service.c @@ -0,0 +1,3076 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "hifc_module.h" +#include "hifc_service.h" +#include "hifc_io.h" +#include "hifc_chipitf.h" + +#define HIFC_RQ_ERROR_FRAME 0x100 +#define HIFC_ELS_SRQ_BUF_NUM 0x9 + +/* Parent SCQ Receive the ELS processing function */ +static unsigned int hifc_scq_rcv_els_cmd(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_els_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_els_rsp_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the GS RSP processing function */ +static unsigned int hifc_scq_rcv_gs_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the BLS RSP processing function */ +static unsigned int hifc_scq_rcv_abts_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the offload completion processing function */ +static unsigned int hifc_scq_rcv_offload_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the flush sq completion processing function */ +static unsigned int hifc_scq_rcv_flush_sq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +/* Parent SCQ Receive the bufferclear completion processing function */ +static unsigned int hifc_scq_rcv_buf_clear_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_sess_rst_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_clear_srq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); +static unsigned int hifc_scq_rcv_abts_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe); + +typedef unsigned int (*pfn_scqe_handler)(struct hifc_hba_s *, + union hifcoe_scqe_u *); + +struct unf_scqe_handler_table_s { + unsigned int scqe_type; /* ELS type */ + int reclaim_sq_wpg; + pfn_scqe_handler pfn_scqe_handle_fun; +}; + +struct unf_scqe_handler_table_s scqe_handler_table[] = { + { /* INI rcvd ELS_CMND */ + HIFC_SCQE_ELS_CMND, + UNF_FALSE, + hifc_scq_rcv_els_cmd + }, + { /* INI rcvd ELS_RSP */ + HIFC_SCQE_ELS_RSP, + UNF_TRUE, + hifc_scq_rcv_els_rsp + }, + { /* INI rcvd GS_RSP */ + HIFC_SCQE_GS_RSP, + UNF_TRUE, + hifc_scq_rcv_gs_rsp + }, + { /* INI rcvd BLS_RSP */ + HIFC_SCQE_ABTS_RSP, + UNF_TRUE, + hifc_scq_rcv_abts_rsp + }, + { /* INI rcvd FCP RSP */ + HIFC_SCQE_FCP_IRSP, + UNF_TRUE, + hifc_scq_recv_iresp + }, + { /* INI rcvd ELS_RSP STS(Done) */ + HIFC_SCQE_ELS_RSP_STS, + UNF_TRUE, + hifc_scq_rcv_els_rsp_sts + }, + { /* INI rcvd Session enable STS */ + HIFC_SCQE_SESS_EN_STS, + UNF_FALSE, + hifc_scq_rcv_offload_sts + }, + { /* INI rcvd flush (pending) SQ STS */ + HIFC_SCQE_FLUSH_SQ_STS, + UNF_FALSE, + hifc_scq_rcv_flush_sq_sts + }, + { /* INI rcvd Buffer clear STS */ + HIFC_SCQE_BUF_CLEAR_STS, + UNF_FALSE, + hifc_scq_rcv_buf_clear_sts + }, + { /* INI rcvd session reset STS */ + HIFC_SCQE_SESS_RST_STS, + UNF_FALSE, + hifc_scq_rcv_sess_rst_sts + }, + { /* ELS SRQ */ + HIFC_SCQE_CLEAR_SRQ_STS, + UNF_FALSE, + hifc_scq_rcv_clear_srq_sts + }, + { /* INI rcvd TMF RSP */ + HIFC_SCQE_FCP_ITMF_RSP, + UNF_TRUE, + hifc_scq_recv_iresp + }, + { /* INI rcvd TMF Marker STS */ + HIFC_SCQE_ITMF_MARKER_STS, + UNF_FALSE, + hifc_scq_rcv_marker_sts + }, + { /* INI rcvd ABTS Marker STS */ + HIFC_SCQE_ABTS_MARKER_STS, + UNF_FALSE, + hifc_scq_rcv_abts_marker_sts + } +}; + +static unsigned int hifc_get_els_rps_pld_len(unsigned short type, + unsigned short cmnd, + unsigned int *v_els_acc_pld_len) +{ + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_els_acc_pld_len, + return UNF_RETURN_ERROR); + + /* RJT */ + if (type == ELS_RJT) { + *v_els_acc_pld_len = UNF_ELS_ACC_RJT_LEN; + return RETURN_OK; + } + + /* ACC */ + switch (cmnd) { + /* uses the same PAYLOAD length as PLOGI. */ + case ELS_FLOGI: + case ELS_PDISC: + case ELS_PLOGI: + *v_els_acc_pld_len = UNF_PLOGI_ACC_PAYLOAD_LEN; + break; + + case ELS_PRLI: + /* The PRLI ACC payload extends 12 bytes */ + *v_els_acc_pld_len = UNF_PRLI_ACC_PAYLOAD_LEN - + UNF_PRLI_SIRT_EXTRA_SIZE; + break; + + case ELS_LOGO: + *v_els_acc_pld_len = UNF_LOGO_ACC_PAYLOAD_LEN; + break; + + case ELS_PRLO: + *v_els_acc_pld_len = UNF_PRLO_ACC_PAYLOAD_LEN; + break; + + case ELS_RSCN: + *v_els_acc_pld_len = UNF_RSCN_ACC_PAYLOAD_LEN; + break; + + case ELS_ADISC: + *v_els_acc_pld_len = UNF_ADISC_ACC_PAYLOAD_LEN; + break; + + case ELS_RRQ: + *v_els_acc_pld_len = UNF_RRQ_ACC_PAYLOAD_LEN; + break; + + case ELS_SCR: + *v_els_acc_pld_len = UNF_SCR_RSP_PAYLOAD_LEN; + break; + + case ELS_ECHO: + *v_els_acc_pld_len = UNF_ECHO_ACC_PAYLOAD_LEN; + break; + case ELS_RLS: + *v_els_acc_pld_len = UNF_RLS_ACC_PAYLOAD_LEN; + break; + case ELS_REC: + *v_els_acc_pld_len = UNF_REC_ACC_PAYLOAD_LEN; + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Unknown ELS command(0x%x)", cmnd); + ret = UNF_RETURN_ERROR; + break; + } + + return ret; +} + +struct hifc_els_cmd_payload_table_s { + unsigned short cmnd; /* ELS type */ + unsigned int req_pld_len; + unsigned int rsp_pld_len; +}; + +struct hifc_els_cmd_payload_table_s els_pld_table_map[] = { + { ELS_FDISC, + UNF_FDISC_PAYLOAD_LEN, + UNF_FDISC_ACC_PAYLOAD_LEN + }, + { ELS_FLOGI, + UNF_FLOGI_PAYLOAD_LEN, + UNF_FLOGI_ACC_PAYLOAD_LEN + }, + { ELS_PLOGI, + UNF_PLOGI_PAYLOAD_LEN, + UNF_PLOGI_ACC_PAYLOAD_LEN + }, + { ELS_SCR, + UNF_SCR_PAYLOAD_LEN, + UNF_SCR_RSP_PAYLOAD_LEN + }, + { ELS_PDISC, + UNF_PDISC_PAYLOAD_LEN, + UNF_PDISC_ACC_PAYLOAD_LEN + }, + { ELS_LOGO, + UNF_LOGO_PAYLOAD_LEN, + UNF_LOGO_ACC_PAYLOAD_LEN + }, + { ELS_PRLO, + UNF_PRLO_PAYLOAD_LEN, + UNF_PRLO_ACC_PAYLOAD_LEN + }, + { ELS_ADISC, + UNF_ADISC_PAYLOAD_LEN, + UNF_ADISC_ACC_PAYLOAD_LEN + }, + { ELS_RRQ, + UNF_RRQ_PAYLOAD_LEN, + UNF_RRQ_ACC_PAYLOAD_LEN + }, + { ELS_RSCN, + 0, + UNF_RSCN_ACC_PAYLOAD_LEN + }, + { ELS_ECHO, + UNF_ECHO_PAYLOAD_LEN, + UNF_ECHO_ACC_PAYLOAD_LEN + }, + { ELS_RLS, + UNF_RLS_PAYLOAD_LEN, + UNF_RLS_ACC_PAYLOAD_LEN + }, + { ELS_REC, + UNF_REC_PAYLOAD_LEN, + UNF_REC_ACC_PAYLOAD_LEN + } +}; + +static unsigned int hifc_get_els_req_and_acc_pld_len(unsigned short cmnd, + unsigned int *req_pld_len, + unsigned int *rsp_pld_len) +{ + unsigned int ret = RETURN_OK; + unsigned int i; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, req_pld_len, return UNF_RETURN_ERROR); + + for (i = 0; i < (sizeof(els_pld_table_map) / + sizeof(struct hifc_els_cmd_payload_table_s)); i++) { + if (els_pld_table_map[i].cmnd == cmnd) { + *req_pld_len = els_pld_table_map[i].req_pld_len; + *rsp_pld_len = els_pld_table_map[i].rsp_pld_len; + return ret; + } + } + + switch (cmnd) { + case ELS_PRLI: + /* If sirt is enabled, The PRLI ACC payload extends + * 12 bytes + */ + *req_pld_len = HIFC_GET_PRLI_PAYLOAD_LEN; + *rsp_pld_len = HIFC_GET_PRLI_PAYLOAD_LEN; + break; + + default: + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, "[err]Unknown ELS_CMD(0x%x)", cmnd); + ret = UNF_RETURN_ERROR; + break; + } + + return ret; +} + +/* + * Function Name : hifc_get_els_frame_len + * Function Description: Get ELS Frame length + * Input Parameters : type, + * : cmnd + * Output Parameters : v_frame_len + * Return Type : unsigned int + */ +static unsigned int hifc_get_els_frame_len(unsigned short type, + unsigned short cmnd, + unsigned int *v_frame_len) +{ + unsigned int ret = RETURN_OK; + unsigned int hdr_len = sizeof(struct unf_fchead_s); + unsigned int req_len = 0; + unsigned int rsp_len = 0; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_frame_len, return UNF_RETURN_ERROR); + + if (type == ELS_RJT) + rsp_len = UNF_ELS_ACC_RJT_LEN; + else + ret = hifc_get_els_req_and_acc_pld_len(cmnd, &req_len, + &rsp_len); + + if (ret == RETURN_OK) + *v_frame_len = hdr_len + ((type == ELS_ACC || type == ELS_RJT) ? + rsp_len : req_len); + + return ret; +} + +static void hifc_build_els_frame_header(unsigned short v_xid_base, + unsigned short v_cmnd_type, + unsigned short els_code, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int fctl = 0; + unsigned int rctl = 0; + unsigned int type = 0; + struct unf_fchead_s *cm_fc_hdr_buf = NULL; + struct unf_fchead_s *pkg_fc_hdr_info = NULL; + + pkg_fc_hdr_info = &v_pkg->frame_head; + cm_fc_hdr_buf = HIFC_GET_CMND_FC_HEADER(v_pkg); + + if (v_cmnd_type == ELS_CMND) { + rctl = HIFC_FC_RCTL_ELS_REQ; + fctl = HIFC_FCTL_REQ; + + /* If the ELS_CMD frame is sent, Adjusting the oxid */ + cm_fc_hdr_buf->oxid_rxid = pkg_fc_hdr_info->oxid_rxid + + ((unsigned int)v_xid_base << 16); + } else { + rctl = HIFC_FC_RCTL_ELS_RSP; + fctl = HIFC_FCTL_RESP; + + /* If the ELS_RSP frame is sent, Adjusting the rxid */ + cm_fc_hdr_buf->oxid_rxid = pkg_fc_hdr_info->oxid_rxid + + v_xid_base; + } + + type = HIFC_FC_TYPE_ELS; + + /* Get SID, DID, OXID, RXID from CM layer */ + cm_fc_hdr_buf->rctl_did = pkg_fc_hdr_info->rctl_did; + cm_fc_hdr_buf->csctl_sid = pkg_fc_hdr_info->csctl_sid; + cm_fc_hdr_buf->parameter = 0; + + /* R_CTL, CS_CTL, TYPE, F_CTL, SEQ_ID, DF_CTL, SEQ_CNT, LL filled */ + UNF_SET_FC_HEADER_RCTL(cm_fc_hdr_buf, rctl); + UNF_SET_FC_HEADER_CS_CTL(cm_fc_hdr_buf, 0); + UNF_SET_FC_HEADER_TYPE(cm_fc_hdr_buf, type); + UNF_SET_FC_HEADER_FCTL(cm_fc_hdr_buf, fctl); + UNF_SET_FC_HEADER_SEQ_CNT(cm_fc_hdr_buf, 0); + UNF_SET_FC_HEADER_DF_CTL(cm_fc_hdr_buf, 0); + UNF_SET_FC_HEADER_SEQ_ID(cm_fc_hdr_buf, 0); + + UNF_PRINT_SFS(UNF_INFO, 0, cm_fc_hdr_buf, sizeof(struct unf_fchead_s)); +} + +void hifc_save_login_para_in_sq_info( + struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_co_parms) +{ + struct hifc_hba_s *hba = NULL; + unsigned int rport_index = v_login_co_parms->rport_index; + struct hifc_parent_sq_info_s *sq_info = NULL; + + hba = (struct hifc_hba_s *)v_hba; + + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) save login parms,but uplevel alloc invalid rport index: 0x%x", + hba->port_cfg.port_id, rport_index); + + return; + } + + sq_info = + &hba->parent_queue_mgr->parent_queues[rport_index].parent_sq_info; + + sq_info->plogi_coparams.seq_cnt = v_login_co_parms->seq_cnt; + sq_info->plogi_coparams.ed_tov = v_login_co_parms->ed_tov; + sq_info->plogi_coparams.tx_mfs = (v_login_co_parms->tx_mfs < + HIFC_DEFAULT_TX_MAX_FREAM_SIZE) ? HIFC_DEFAULT_TX_MAX_FREAM_SIZE : + v_login_co_parms->tx_mfs; + + sq_info->plogi_coparams.ed_tov_timer_val = + v_login_co_parms->ed_tov_timer_val; +} + +static void hifc_save_default_plogi_param_in_ctx( + struct hifc_hba_s *v_hba, + struct hifcoe_parent_context_s *v_ctx, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int tx_mfs = HIFC_DEFAULT_TX_MAX_FREAM_SIZE; + unsigned int did = 0; + + did = UNF_GET_DID(v_pkg); + + if (did == UNF_FC_FID_DIR_SERV) + tx_mfs = 2048; + + v_ctx->sw_section.tx_mfs = cpu_to_be16((unsigned short)(tx_mfs)); +} + +static void hifc_save_plogi_acc_param_in_ctx( + struct hifc_hba_s *v_hba, + struct hifcoe_parent_context_s *v_ctx, + struct unf_frame_pkg_s *v_pkg) +{ +#define HIFC_UCODE_MAX_PKT_SIZE_PER_DISPATCH ((8 * 1024)) + + struct unf_lgn_port_coparms_s *port_co_param = NULL; + struct unf_plogi_payload_s *plogi_acc_pld = NULL; + + plogi_acc_pld = UNF_GET_PLOGI_ACC_PAYLOAD(v_pkg); + port_co_param = &plogi_acc_pld->parms.co_parms; + + /* e_d_tov and seq_cnt */ + hifc_big_to_cpu32(&v_ctx->sw_section.sw_ctxt_config.pctxt_val1, + sizeof(unsigned int)); + + v_ctx->sw_section.sw_ctxt_config.dw.e_d_tov = + port_co_param->e_d_tov_resolution; + + v_ctx->sw_section.sw_ctxt_config.dw.seq_cnt = + port_co_param->seq_cnt; + + hifc_cpu_to_big32(&v_ctx->sw_section.sw_ctxt_config.pctxt_val1, + sizeof(unsigned int)); + + v_ctx->sw_section.tx_mfs = + (unsigned short)(v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE]) < + HIFC_DEFAULT_TX_MAX_FREAM_SIZE ? + cpu_to_be16((unsigned short)HIFC_DEFAULT_TX_MAX_FREAM_SIZE) : + cpu_to_be16 ((unsigned short) + (v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE])); + + v_ctx->sw_section.e_d_tov_timer_val = + cpu_to_be32(port_co_param->e_d_tov); + + v_ctx->sw_section.mfs_unaligned_bytes = + cpu_to_be16(HIFC_UCODE_MAX_PKT_SIZE_PER_DISPATCH % + port_co_param->bb_receive_data_field_size); +} + +static void hifc_recover_offloading_state( + struct hifc_parent_queue_info_s *v_prntq_info, + enum hifc_parent_queue_state_e offload_state) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&v_prntq_info->parent_queue_state_lock, flag); + + if (v_prntq_info->offload_state == HIFC_QUEUE_STATE_OFFLOADING) + v_prntq_info->offload_state = offload_state; + + spin_unlock_irqrestore(&v_prntq_info->parent_queue_state_lock, flag); +} + +static void hifc_save_magic_num_in_ctx(struct hifcoe_parent_context_s *v_ctx, + struct unf_frame_pkg_s *v_pkg) +{ + /* The CID itself is initialized by the microcode. + * The driver multiplexes the CID as magicnum and then updates + * the CID by the microcode. + */ + v_ctx->sw_section.cid = cpu_to_be32(UNF_GETXCHGALLOCTIME(v_pkg)); +} + +static void hifc_save_magic_num_in_nurmal_root_ts( + struct hifc_root_sqe_s *v_rt_sqe, + struct unf_frame_pkg_s *v_pkg) +{ + v_rt_sqe->task_section.fc_dw1.magic_num = UNF_GETXCHGALLOCTIME(v_pkg); +} + +static int hifc_check_need_delay_offload( + void *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int rport_idx, + struct hifc_parent_queue_info_s *v_cur_parent_queue, + struct hifc_parent_queue_info_s **v_offload_parnt_queue) +{ + unsigned long flag = 0; + struct hifc_parent_queue_info_s *offload_parnt_queue = NULL; + + spin_lock_irqsave(&v_cur_parent_queue->parent_queue_state_lock, flag); + + if (v_cur_parent_queue->offload_state == HIFC_QUEUE_STATE_OFFLOADING) { + spin_unlock_irqrestore( + &v_cur_parent_queue->parent_queue_state_lock, flag); + + offload_parnt_queue = hifc_find_offload_parent_queue( + v_hba, + v_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, + v_pkg->frame_head.rctl_did & UNF_NPORTID_MASK, + rport_idx); + if (offload_parnt_queue) { + *v_offload_parnt_queue = offload_parnt_queue; + + return UNF_TRUE; + } + } else { + spin_unlock_irqrestore( + &v_cur_parent_queue->parent_queue_state_lock, flag); + } + + return UNF_FALSE; +} + +static unsigned int hifc_build_service_wqe_root_offload( + void *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifc_parent_queue_info_s *v_parnt_qinfo, + struct hifc_root_sqe_s *v_sqe) +{ + unsigned int cqm_xid = 0; + unsigned short els_cmnd_type = UNF_ZERO; + struct hifc_parent_ctx_s *parnt_ctx = NULL; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifcoe_parent_context_s *v_ctx = NULL; + + els_cmnd_type = HIFC_GET_ELS_RSP_TYPE(v_pkg->cmnd); + cqm_xid = hifc_get_parent_ctx_xid_by_pkg(v_hba, v_pkg); + + /* An offload request is initiated only when the parent queue is in the + * initialized state + */ + if (v_parnt_qinfo->offload_state == HIFC_QUEUE_STATE_INITIALIZED) { + /* Obtain Parent Context and set WQE to off_load, GPA_Addr */ + parnt_ctx = hifc_get_parnt_ctx_virt_addr_by_pkg(v_hba, v_pkg); + + sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg); + if (unlikely((!parnt_ctx) || (!sq_info) || + (cqm_xid == INVALID_VALUE32))) { + return UNF_RETURN_ERROR; + } + + /* Fill in ROOT SQE with offload request */ + hifc_build_els_wqe_root_offload( + v_sqe, + parnt_ctx->cqm_parent_ctx_obj->paddr, + cqm_xid); + + /* If the value is PlogiAcc, parse the FlogiAcc negotiation + * parameter and fill in Context + */ + v_ctx = (struct hifcoe_parent_context_s *) + parnt_ctx->virt_parent_ctx; + + if (els_cmnd_type == ELS_ACC) + hifc_save_plogi_acc_param_in_ctx( + (struct hifc_hba_s *)v_hba, v_ctx, v_pkg); + else + hifc_save_default_plogi_param_in_ctx( + (struct hifc_hba_s *)v_hba, v_ctx, v_pkg); + + /* The SID DID parameter is updated to Parent SQ Qinfo */ + sq_info->local_port_id = UNF_GET_SID(v_pkg); + sq_info->remote_port_id = UNF_GET_DID(v_pkg); + + /* Transfers the key value to the ucode for offload */ + hifc_big_to_cpu32(v_ctx->key, sizeof(v_ctx->key)); + memcpy(v_ctx->key, &sq_info->local_port_id, + sizeof(sq_info->local_port_id)); + memcpy((unsigned char *)v_ctx->key + + sizeof(sq_info->local_port_id), + &sq_info->remote_port_id, + sizeof(sq_info->remote_port_id)); + + hifc_cpu_to_big32(v_ctx->key, sizeof(v_ctx->key)); + + /* Update magic num to parent_ctx */ + hifc_save_magic_num_in_ctx(v_ctx, v_pkg); + + hifc_build_service_wqe_ctx_sge( + v_sqe, parnt_ctx->parent_ctx, + sizeof(struct hifcoe_parent_context_s)); + + v_parnt_qinfo->offload_state = HIFC_QUEUE_STATE_OFFLOADING; + } else { + /* If the connection is being uninstalled and the plogi is + * delivered through the root channel, the plogi must be carried + * to the ucode. + */ + v_sqe->task_section.fc_dw4.parent_xid = cqm_xid; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send PLOGI with no offload while parent queue is not initialized status", + ((struct hifc_hba_s *)v_hba)->port_cfg.port_id); + } + + return RETURN_OK; +} + +static unsigned int hifc_send_els_via_root(void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned short els_cmd_code = UNF_ZERO; + unsigned short els_cmnd_type = UNF_ZERO; + unsigned int frame_len = 0; + unsigned int exch_id = 0; + unsigned int scq_num = 0; + unsigned int rport_idx = 0; + int sqe_delay = UNF_FALSE; + void *frame_addr = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + struct hifc_parent_queue_info_s *offload_parnt_queue = NULL; + struct hifc_root_sqe_s *sqe = NULL; + struct hifc_root_sqe_s local_rt_sqe; + unsigned long flag = 0; + enum hifc_parent_queue_state_e last_offload_state = + HIFC_QUEUE_STATE_INITIALIZED; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + unsigned long long frame_phy_addr; + + /* The ROOT SQE is assembled in local variables and then copied to the + * queue memory + */ + sqe = &local_rt_sqe; + hba = (struct hifc_hba_s *)v_hba; + + memset(sqe, 0, sizeof(local_rt_sqe)); + + /* Determine the ELS type in the pstPkg */ + els_cmnd_type = HIFC_GET_ELS_RSP_TYPE(v_pkg->cmnd); + if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) { + els_cmd_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd); + exch_id = UNF_GET_RXID(v_pkg); + sqe->task_section.fc_dw0.task_type = HIFC_SQE_ELS_RSP; + } else { + els_cmd_code = els_cmnd_type; + els_cmnd_type = ELS_CMND; + exch_id = UNF_GET_OXID(v_pkg); + sqe->task_section.fc_dw0.task_type = HIFC_SQE_ELS_CMND; + } + if ((els_cmd_code == ELS_ECHO) && (els_cmnd_type != ELS_RJT)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_WARN, + "[info]Port(0x%x) RPort(0x%x) send ELS ECHO can't send via root Type(0x%x)", + hba->port_cfg.port_id, rport_idx, els_cmnd_type); + + return UNF_RETURN_NOT_SUPPORT; + } + exch_id += hba->exit_base; + + ret = hifc_get_els_frame_len(els_cmnd_type, els_cmd_code, &frame_len); + if (ret != RETURN_OK) { + dump_stack(); + return ret; + } + + /* Obtains the frame start address */ + frame_addr = HIFC_GET_CMND_HEADER_ADDR(v_pkg); + frame_phy_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr; + + /* Assemble the frame header and adjust the Paylaod based on the ELS */ + hifc_build_els_frame_header(hba->exit_base, els_cmnd_type, + els_cmd_code, v_pkg); + + /* Assembling the Control Section */ + hifc_build_service_wqe_ctrl_section( + &sqe->ctrl_section, + HIFC_BYTES_TO_QW_NUM( + sizeof(struct hifc_root_sqe_task_section_s)), + HIFC_BYTES_TO_QW_NUM(sizeof(struct hifc_root_sge_s))); + + /* Fill in Normal Root SQE TS */ + rport_idx = v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + scq_num = hifc_get_rport_maped_cmd_scqn(v_hba, rport_idx); + hifc_build_service_wqe_root_ts(v_hba, sqe, exch_id, rport_idx, scq_num); + + /* Upsate magic number into sqe */ + hifc_save_magic_num_in_nurmal_root_ts(sqe, v_pkg); + + /* Fill in the special part of Normal Root SQE TS and initiate implicit + * uninstallation + */ + if ((els_cmd_code == ELS_PLOGI) && (els_cmnd_type != ELS_RJT)) { + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!prnt_qinfo) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[warn]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) find parent queue fail", + hba->port_cfg.port_id, rport_idx, + els_cmnd_type); + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + last_offload_state = prnt_qinfo->offload_state; + + /* Fill in the special part of Normal Root SQE TS */ + ret = hifc_build_service_wqe_root_offload((void *)hba, + v_pkg, prnt_qinfo, + sqe); + if (ret != RETURN_OK) { + spin_unlock_irqrestore( + &prnt_qinfo->parent_queue_state_lock, flag); + + return ret; + } + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + /* Before the offload, check whether there is a risk of + * repeated offload + */ + sqe_delay = hifc_check_need_delay_offload((void *)hba, + v_pkg, rport_idx, + prnt_qinfo, + &offload_parnt_queue); + } + + /* Fill in Normal Root SQE SGE */ + hifc_build_service_wqe_root_sge(sqe, frame_addr, frame_phy_addr, + frame_len, v_hba); + + if (sqe_delay == UNF_TRUE) { + ret = hifc_push_delay_sqe((void *)hba, offload_parnt_queue, + sqe, v_pkg); + if (ret == RETURN_OK) { + hifc_recover_offloading_state(prnt_qinfo, + last_offload_state); + + return ret; + } + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x)", + hba->port_cfg.port_id, rport_idx, els_cmnd_type, + els_cmd_code, exch_id); + + ret = hifc_root_sq_enqueue(hba, sqe); + if ((ret != RETURN_OK) && (prnt_qinfo)) { + hifc_recover_offloading_state(prnt_qinfo, last_offload_state); + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == + UNF_TRUE) { + memcpy(&destroy_sqe_info, + &prnt_qinfo->parent_sq_info.destroy_sqe, + sizeof(struct hifc_destroy_ctrl_info_s)); + + prnt_qinfo->parent_sq_info.destroy_sqe.valid = + UNF_FALSE; + } + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + hifc_pop_destroy_parent_queue_sqe((void *)v_hba, + &destroy_sqe_info); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[warn]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x) fail, recover offloadstatus(%u)", + hba->port_cfg.port_id, + rport_idx, + els_cmnd_type, + els_cmd_code, + exch_id, + prnt_qinfo->offload_state); + } + + return ret; +} + +static void *hifc_get_els_frame_addr(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned short els_cmd_code, + unsigned short els_cmnd_type, + unsigned long long *v_phyaddr) +{ + void *frame_pld_addr; + dma_addr_t els_frame_addr = 0; + + if (els_cmd_code == ELS_ECHO) { + frame_pld_addr = (void *)UNF_GET_ECHO_PAYLOAD(v_pkg); + els_frame_addr = UNF_GET_ECHO_PAYLOAD_PHYADDR(v_pkg); + } else if (els_cmd_code == ELS_RSCN) { + if (els_cmnd_type == ELS_CMND) { + /* Not Support */ + frame_pld_addr = NULL; + els_frame_addr = 0; + } else { + frame_pld_addr = + (void *)UNF_GET_RSCN_ACC_PAYLOAD(v_pkg); + els_frame_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr + + sizeof(struct unf_fchead_s); + } + } else { + frame_pld_addr = (void *)HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg); + els_frame_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr + + sizeof(struct unf_fchead_s); + } + *v_phyaddr = els_frame_addr; + return frame_pld_addr; +} + +static unsigned int hifc_send_els_via_parent( + void *v_hba, + struct unf_frame_pkg_s *v_pkg, + struct hifc_parent_queue_info_s *v_prntq_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned short els_cmd_code = UNF_ZERO; + unsigned short els_cmnd_type = UNF_ZERO; + unsigned short remote_xid = 0; + unsigned short local_xid = 0; + struct hifc_hba_s *hba; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifcoe_sqe_s sqe; + void *frame_pld_addr; + unsigned int frame_pld_len = 0; + unsigned int acc_pld_len = 0; + unsigned long long fram_phy_addr = 0; + + hba = (struct hifc_hba_s *)v_hba; + + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + + sq_info = &v_prntq_info->parent_sq_info; + + /* Determine the ELS type in pstPkg */ + els_cmnd_type = HIFC_GET_ELS_CMND_CODE(v_pkg->cmnd); + if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) { + els_cmd_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd); + remote_xid = UNF_GET_OXID(v_pkg); + local_xid = UNF_GET_RXID(v_pkg) + hba->exit_base; + } else { + els_cmd_code = els_cmnd_type; + els_cmnd_type = ELS_CMND; + local_xid = UNF_GET_OXID(v_pkg) + hba->exit_base; + remote_xid = UNF_GET_RXID(v_pkg); + } + + frame_pld_addr = hifc_get_els_frame_addr(v_hba, v_pkg, els_cmd_code, + els_cmnd_type, &fram_phy_addr); + + if (HIFC_PKG_IS_ELS_RSP(els_cmnd_type)) { + ret = hifc_get_els_rps_pld_len(els_cmnd_type, els_cmd_code, + &frame_pld_len); + if (ret != RETURN_OK) + return ret; + + hifc_build_els_wqe_ts_rsp( + &sqe, sq_info, frame_pld_addr, + els_cmnd_type, els_cmd_code, + v_prntq_info->parent_sts_scq_info.cqm_queue_id); + } else { + /* Fill in HIFCOE_TASK_T_ELS */ + ret = hifc_get_els_req_and_acc_pld_len(els_cmd_code, + &frame_pld_len, + &acc_pld_len); + if (ret != RETURN_OK) + return ret; + + hifc_build_els_wqe_ts_req( + &sqe, sq_info, els_cmd_code, + v_prntq_info->parent_sts_scq_info.cqm_queue_id, + frame_pld_addr); + } + + /* Assemble the magicnum field of the els */ + hifc_build_els_wqe_ts_magic_num(&sqe, els_cmnd_type, + UNF_GETXCHGALLOCTIME(v_pkg)); + + /* Assemble the SQE Control Section part */ + hifc_build_service_wqe_ctrl_section( + &sqe.ctrl_sl, + HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE), + HIFC_BYTES_TO_QW_NUM(sizeof(struct hifcoe_variable_sge_s))); + + /* Assemble the SQE Task Section Els Common part */ + hifc_build_service_wqe_ts_common(&sqe.ts_sl, sq_info->rport_index, + local_xid, remote_xid, + HIFC_LSW(frame_pld_len)); + + /* Build SGE */ + hifc_build_els_gs_wqe_sge(&sqe, frame_pld_addr, fram_phy_addr, + frame_pld_len, sq_info->context_id, v_hba); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) ExchId(0x%x)", + hba->port_cfg.port_id, sq_info->rport_index, els_cmnd_type, + els_cmd_code, local_xid); + + ret = hifc_parent_sq_enqueue(sq_info, &sqe); + + return ret; +} + +unsigned int hifc_send_els_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + unsigned short els_cmd_code = UNF_ZERO; + unsigned short els_rsp_code = UNF_ZERO; + union unf_sfs_u *fc_entry = NULL; + struct unf_rrq_s *rrq_pld = NULL; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + /* Check Parameters */ + UNF_CHECK_VALID(0x5014, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x5015, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x5016, UNF_TRUE, UNF_GET_SFS_ENTRY(v_pkg), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x5017, UNF_TRUE, HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg), + return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + + hba = (struct hifc_hba_s *)v_hba; + els_cmd_code = HIFC_GET_ELS_CMND_CODE(v_pkg->cmnd); + els_rsp_code = HIFC_GET_ELS_RSP_CODE(v_pkg->cmnd); + + /* If RRQ Req, Special processing */ + if (els_cmd_code == ELS_RRQ) { + fc_entry = UNF_GET_SFS_ENTRY(v_pkg); + rrq_pld = &fc_entry->rrq; + ox_id = (unsigned short)(rrq_pld->oxid_rxid >> 16); + rx_id = (unsigned short)(rrq_pld->oxid_rxid & 0xFFFF); + ox_id += hba->exit_base; + rrq_pld->oxid_rxid = ox_id << 16 | rx_id; + } + + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!prnt_qinfo) { + HIFC_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) send ELS SID(0x%x) DID(0x%x) get a null parent queue info, send via root", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + /* If the Rport cannot be found, Send Pkg by Root SQ */ + ret = hifc_send_els_via_root(v_hba, v_pkg); + return ret; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + /* After offload, Send Pkg by Parent SQ */ + if (HIFC_RPORT_OFFLOADED(prnt_qinfo)) { + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + ret = hifc_send_els_via_parent(v_hba, v_pkg, prnt_qinfo); + } else { + /* Before offload, Send Pkg by Root SQ */ + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + ret = hifc_send_els_via_root(v_hba, v_pkg); + } + + return ret; +} + +unsigned int hifc_rq_rcv_els_rsp_sts( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_cs_info) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rx_id = (~0); + struct unf_frame_pkg_s pkg = { 0 }; + + rx_id = (unsigned int)v_cs_info->exch_id - v_hba->exit_base; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = v_cs_info->magic_num; + + ret = hifc_rcv_els_rsp_sts(v_hba, &pkg, rx_id); + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_ELS_RSP_STS); + + return ret; +} + +static unsigned int hifc_recv_els_rsp_payload(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id, + unsigned char *v_els_pld_buf, + unsigned int pld_len) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_ELS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Payload Buffer in ROOT SQ Buffer */ + v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_els_pld_buf; + v_pkg->unf_cmnd_pload_bl.length = pld_len; + v_pkg->byte_orders |= HIFC_BIT_2; + + /* Mark as a non-last block */ + v_pkg->last_pkg_flag = UNF_PKG_NOT_LAST_RESPONSE; + + UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +static unsigned int hifc_rq_rcv_els_frame(struct hifc_hba_s *v_hba, + unsigned char *v_frame, + unsigned int frame_len, + unsigned short pkg_flag, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + unsigned int pld_len = 0; + unsigned char *plg_buf = NULL; + unsigned long flags = 0; + + plg_buf = v_frame; + pld_len = frame_len; + + v_pkg->status = UNF_IO_SUCCESS; + + if (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == + HIFC_FC_RCTL_ELS_RSP) { + ox_id = v_pkg->frame_head.oxid_rxid >> 16; + + if (!(HIFC_XID_IS_VALID(ox_id, (unsigned int)v_hba->exit_base, + (unsigned int)v_hba->exit_count))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, + UNF_WARN, "[err]Port(0x%x) ExchId(0x%x) isn't in 0x%x~0x%x", + v_hba->port_cfg.port_id, ox_id, + v_hba->exit_base, + v_hba->exit_base + v_hba->exit_count - 1); + + goto rq_recv_error_els_frame; + } + + ox_id -= v_hba->exit_base; + + ret = hifc_recv_els_rsp_payload(v_hba, v_pkg, ox_id, plg_buf, + pld_len); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, + UNF_ERR, + "[err]Port(0x%x) receive ESL RSP payload error, OXID(0x%x) RXID(0x%x) PldLen(0x%x)", + v_hba->port_cfg.port_id, UNF_GET_OXID(v_pkg), + UNF_GET_RXID(v_pkg), pld_len); + + HIFC_ERR_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_RSP); + } + + if (HIFC_CHECK_IF_LAST_PKG(pkg_flag)) { + ret = hifc_rcv_els_rsp(v_hba, v_pkg, ox_id); + + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_RSP); + } + } else if (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == + HIFC_FC_RCTL_ELS_REQ) { + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ELS_CMD); + + if (HIFC_CHECK_IF_FIRST_PKG(pkg_flag)) + v_pkg->xchg_contex = NULL; + + v_pkg->last_pkg_flag = (HIFC_CHECK_IF_LAST_PKG(pkg_flag)) ? + UNF_PKG_LAST_REQUEST : UNF_PKG_NOT_LAST_REQUEST; + + ret = hifc_rcv_els_cmnd(v_hba, v_pkg, plg_buf, pld_len, + HIFC_CHECK_IF_FIRST_PKG(pkg_flag)); + + spin_lock_irqsave(&v_hba->delay_info.srq_lock, flags); + if (v_hba->delay_info.srq_delay_flag) { + v_hba->delay_info.srq_delay_flag = 0; + + if (!cancel_delayed_work(&v_hba->delay_info.del_work)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) rcvd plogi from srq process delay timer maybe timeout", + v_hba->port_cfg.port_id); + } + spin_unlock_irqrestore(&v_hba->delay_info.srq_lock, + flags); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[info]Port(0x%x) received els from root rq and send delay plogi to CM", + v_hba->port_cfg.port_id); + + hifc_rcv_els_cmnd( + v_hba, &v_hba->delay_info.pkg, + v_hba->delay_info.pkg.unf_cmnd_pload_bl.buffer_ptr, + 0, UNF_FALSE); + } else { + spin_unlock_irqrestore(&v_hba->delay_info.srq_lock, + flags); + } + + } else { + goto rq_recv_error_els_frame; + } + + return ret; + +rq_recv_error_els_frame: + return HIFC_RQ_ERROR_FRAME; +} + +static unsigned int hifc_rq_rcv_bls_frame(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = RETURN_OK; + unsigned int ox_id = INVALID_VALUE32; + + v_pkg->status = UNF_IO_SUCCESS; + + if ((UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == HIFC_RCTL_BLS_ACC) || + (UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head) == HIFC_RCTL_BLS_RJT)) { + /* INI Mode */ + ox_id = UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head); + if ((ox_id < (unsigned int)v_hba->exit_base) || + (ox_id >= (unsigned int)(v_hba->exit_base + + v_hba->exit_count))) { + goto rq_recv_error_bls_frame; + } + ox_id -= v_hba->exit_base; + + ret = hifc_rcv_bls_rsp(v_hba, v_pkg, ox_id); + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_RCV_ABTS_RSP); + } else { + goto rq_recv_error_bls_frame; + } + + return ret; + +rq_recv_error_bls_frame: + return HIFC_RQ_ERROR_FRAME; +} + +static unsigned int hifc_rq_rcv_service_frame(struct hifc_hba_s *v_hba, + unsigned char *v_frame, + unsigned int frame_len, + unsigned short pkg_flag, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned char fc_frame_type = 0; + + fc_frame_type = UNF_GET_FC_HEADER_TYPE(&v_pkg->frame_head); + + if (fc_frame_type == HIFC_FC_TYPE_ELS) { + v_hba->delay_info.root_rq_rcvd_flag = 1; + ret = hifc_rq_rcv_els_frame(v_hba, v_frame, frame_len, + pkg_flag, v_pkg); + } else if (fc_frame_type == HIFC_FC_TYPE_BLS) { + ret = hifc_rq_rcv_bls_frame(v_hba, v_pkg); + } else { + ret = HIFC_RQ_ERROR_FRAME; + } + + if (ret == HIFC_RQ_ERROR_FRAME) { + /* Error statistics are collected when an invalid frame + * is received + */ + HIFC_IO_STAT(v_hba, HIFCOE_TASK_T_BUTT); + + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[info]Port(0x%x) Receive an unsupported frame, Rctl(0x%x), Type(0x%x), Fctl(0x%x), Sid_Did(0x%x_0x%x),OxId_RxId(0x%x_0x%x), FrameLen(0x%x), drop it", + v_hba->port_cfg.port_id, + UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head), + UNF_GET_FC_HEADER_TYPE(&v_pkg->frame_head), + UNF_GET_FC_HEADER_FCTL(&v_pkg->frame_head), + UNF_GET_FC_HEADER_SID(&v_pkg->frame_head), + UNF_GET_FC_HEADER_DID(&v_pkg->frame_head), + UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head), + UNF_GET_FC_HEADER_RXID(&v_pkg->frame_head), + frame_len); + } + + return ret; +} + +unsigned int hifc_rcv_service_frame_from_rq(struct hifc_hba_s *v_hba, + struct hifc_root_rq_info_s + *v_rq_info, + struct hifc_root_rq_complet_info_s + *v_complet_info, + unsigned short v_rcv_buf_num) +{ + unsigned short remain_len = 0; + unsigned short rcv_len = 0; + unsigned short pkg_flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short pkt_len = 0; + void *root_rq_rcv_buf = NULL; + unsigned short ci = 0; + unsigned int loop = 0; + struct unf_frame_pkg_s pkg = { 0 }; + struct unf_fchead_s *els_frame = NULL; + unsigned char *pld_buf = NULL; + unsigned int pld_len = 0; + + ci = v_rq_info->ci; + pkt_len = v_complet_info->buf_length; + memset(&pkg, 0, sizeof(pkg)); + + for (loop = 0; loop < v_rcv_buf_num; loop++) { + /* Obtain rcv buffer */ + root_rq_rcv_buf = + (void *)((unsigned long long)v_rq_info->rq_rcv_buff + + HIFC_ROOT_RQ_RECV_BUFF_SIZE * ci); + + /* Calculate the frame data address and length */ + els_frame = (struct unf_fchead_s *)root_rq_rcv_buf; + rcv_len = HIFC_ROOT_RQ_RECV_BUFF_SIZE; + pkg_flag = 0; + + if (loop == (v_rcv_buf_num - 1)) { + pkg_flag |= HIFC_LAST_PKG_FLAG; + remain_len = pkt_len % HIFC_ROOT_RQ_RECV_BUFF_SIZE; + rcv_len = (remain_len > 0) ? (remain_len) : + HIFC_ROOT_RQ_RECV_BUFF_SIZE; + } + + /* Calculate the frame data address and length */ + if (loop == 0) { + pkg_flag |= HIFC_FIRST_PKG_FLAG; + + memcpy(&pkg.frame_head, els_frame, + sizeof(pkg.frame_head)); + hifc_big_to_cpu32(&pkg.frame_head, + sizeof(pkg.frame_head)); + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_complet_info->magic_num; + + pld_buf = (unsigned char *)(els_frame + 1); + pld_len = rcv_len - sizeof(pkg.frame_head); + } else { + pld_buf = (unsigned char *)els_frame; + pld_len = rcv_len; + } + + /* Processing the rqe sent by the FC ucode */ + ret = hifc_rq_rcv_service_frame(v_hba, pld_buf, pld_len, + pkg_flag, &pkg); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "[err]Up layer Process RQE frame or status abnormal(0x%x)", + ret); + + return UNF_RETURN_ERROR; + } + + ci = ((ci + 1) < v_rq_info->q_depth) ? (ci + 1) : 0; + } + + return RETURN_OK; +} + +static unsigned int hifc_rcv_gs_rsp_payload(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id, + unsigned char *v_els_pld_buf, + unsigned int pld_len) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_GS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Convert to small endian */ + hifc_big_to_cpu32(v_els_pld_buf, pld_len); + + /* Payload Buffer in ROOT SQ Buffer */ + v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_els_pld_buf; + v_pkg->unf_cmnd_pload_bl.length = pld_len; + + /* Mark as a non-last block */ + v_pkg->last_pkg_flag = UNF_PKG_NOT_LAST_RESPONSE; + + UNF_LOWLEVEL_RECEIVE_GS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +static unsigned int hifc_scq_rcv_abts_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* Default path, which is sent from SCQ to the driver */ + unsigned char status = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_abts_rsp_s *abts_rsp = NULL; + + abts_rsp = &v_scqe->rcv_abts_rsp; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = abts_rsp->magic_num; + + ox_id = (unsigned int)(abts_rsp->wd0.ox_id); + + if (unlikely((ox_id < (unsigned int)v_hba->exit_base) || + (ox_id >= + (unsigned int)(v_hba->exit_base + v_hba->exit_count)))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) has bad OX_ID(0x%x) for bls_rsp", + v_hba->port_cfg.port_id, ox_id); + + return UNF_RETURN_ERROR; + } + + ox_id -= v_hba->exit_base; + + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) BLS response has error code(0x%x) tag(0x%x)", + v_hba->port_cfg.port_id, + HIFC_GET_SCQE_STATUS(v_scqe), + (unsigned int)(abts_rsp->wd0.ox_id)); + + status = UNF_IO_FAILED; + } else { + pkg.frame_head.rctl_did = abts_rsp->wd3.did; + pkg.frame_head.csctl_sid = abts_rsp->wd4.sid; + pkg.frame_head.oxid_rxid = (unsigned int)(abts_rsp->wd0.rx_id) + | ox_id << 16; + + /* BLS_ACC/BLS_RJT: IO_succeed */ + if (abts_rsp->wd2.fh_rctrl == HIFC_RCTL_BLS_ACC) { + status = UNF_IO_SUCCESS; + } else if (abts_rsp->wd2.fh_rctrl == HIFC_RCTL_BLS_RJT) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) ABTS RJT: %08x-%08x-%08x", + v_hba->port_cfg.port_id, + abts_rsp->payload[0], + abts_rsp->payload[1], abts_rsp->payload[2]); + + status = UNF_IO_SUCCESS; + } else { + /* 3. BA_RSP type is err: IO_failed */ + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) BLS response RCTL is error", + v_hba->port_cfg.port_id); + + HIFC_ERR_IO_STAT(v_hba, HIFC_SCQE_ABTS_RSP); + + status = UNF_IO_FAILED; + } + } + + /* Set PKG/exchange status & Process BLS_RSP */ + pkg.status = status; + ret = hifc_rcv_bls_rsp(v_hba, &pkg, ox_id); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) recv ABTS rsp OX_ID(0x%x) RX_ID(0x%x) SID(0x%x) DID(0x%x) %s", + v_hba->port_cfg.port_id, + ox_id, + abts_rsp->wd0.rx_id, + abts_rsp->wd4.sid, + abts_rsp->wd3.did, + (ret == RETURN_OK) ? "OK" : "ERROR"); + + return ret; +} + +unsigned int hifc_rq_rcv_srv_err(struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_cs_info) +{ + UNF_REFERNCE_VAR(v_hba); + UNF_REFERNCE_VAR(v_cs_info); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]hifc_rq_rcv_srv_err not implemented yet"); + + if (!v_hba) + return UNF_RETURN_ERROR; + + if (!v_cs_info) + return UNF_RETURN_ERROR; + + return UNF_RETURN_ERROR; +} + +unsigned int hifc_rcv_els_cmnd(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned char *v_pld, + unsigned int pld_len, + int first_frame) +{ + unsigned int ret = UNF_RETURN_ERROR; + + /* Convert Payload to small endian */ + hifc_big_to_cpu32(v_pld, pld_len); + + v_pkg->type = UNF_PKG_ELS_REQ; + + v_pkg->unf_cmnd_pload_bl.buffer_ptr = v_pld; + + /* Payload length */ + v_pkg->unf_cmnd_pload_bl.length = pld_len; + + /* Obtain the Cmnd type from the Paylaod. The Cmnd is in small endian */ + if (first_frame == UNF_TRUE) { + v_pkg->cmnd = UNF_GET_FC_PAYLOAD_ELS_CMND( + v_pkg->unf_cmnd_pload_bl.buffer_ptr); + } + + /* Errors have been processed in HIFC_RecvElsError */ + v_pkg->status = UNF_IO_SUCCESS; + + /* Send PKG to the CM layer */ + UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_els_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + /* Receive CmndReqSts */ + v_pkg->type = UNF_PKG_ELS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + v_pkg->byte_orders |= HIFC_BIT_2; + + /* Mark the last block */ + v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; + + /* Send PKG to the CM layer */ + UNF_LOWLEVEL_RECEIVE_ELS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_els_rsp_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int rx_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_ELS_REPLY_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = rx_id; + + UNF_LOWLEVEL_SEND_ELS_DONE(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_gs_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + /* Receive CmndReqSts */ + v_pkg->type = UNF_PKG_GS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Mark the last block */ + v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; + + /* Send PKG to the CM layer */ + UNF_LOWLEVEL_RECEIVE_GS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_bls_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + /* + * 1. SCQ (normal) + * 2. from Root RQ (parent no existence) + ** + * single frame, single sequence + */ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->type = UNF_PKG_BLS_REQ_DONE; + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + v_pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; + + UNF_LOWLEVEL_RECEIVE_BLS_PKG(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_tmf_marker_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + /* Send PKG info to COM */ + UNF_LOWLEVEL_RECEIVE_MARKER_STS(ret, v_hba->lport, v_pkg); + + return ret; +} + +unsigned int hifc_rcv_abts_marker_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id) +{ + unsigned int ret = UNF_RETURN_ERROR; + + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = ox_id; + + UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(ret, v_hba->lport, v_pkg); + + return ret; +} + +void hifc_scqe_error_pre_process(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* Currently, only printing and statistics collection are performed */ + HIFC_ERR_IO_STAT(v_hba, HIFC_GET_SCQE_TYPE(v_scqe)); + HIFC_SCQ_ERR_TYPE_STAT(v_hba, HIFC_GET_SCQE_STATUS(v_scqe)); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_ABNORMAL, UNF_WARN, + "[warn]Port(0x%x)-Task_type(%u) SCQE contain error code(%u), additional info(0x%x)", + v_hba->port_cfg.port_id, + v_scqe->common.ch.wd0.task_type, + v_scqe->common.ch.wd0.err_code, + v_scqe->common.conn_id); +} + +unsigned int hifc_rcv_scqe_entry_from_scq(void *v_hba, void *v_scqe, + unsigned int scq_idx) +{ + unsigned int ret = UNF_RETURN_ERROR; + int do_reclaim = UNF_FALSE; + unsigned int index = 0; + unsigned int total_index = 0; + struct hifc_hba_s *hba = NULL; + union hifcoe_scqe_u *scqe = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_scqe, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, HIFC_TOTAL_SCQ_NUM > scq_idx, + return UNF_RETURN_ERROR); + + scqe = (union hifcoe_scqe_u *)v_scqe; + hba = (struct hifc_hba_s *)v_hba; + + HIFC_IO_STAT(hba, HIFC_GET_SCQE_TYPE(scqe)); + + /* 1. error code cheking */ + if (unlikely(HIFC_SCQE_HAS_ERRCODE(scqe))) { + /* So far, just print & counter */ + hifc_scqe_error_pre_process(hba, scqe); + } + + /* 2. Process SCQE by corresponding processer */ + total_index = sizeof(scqe_handler_table) / + sizeof(struct unf_scqe_handler_table_s); + while (index < total_index) { + if (HIFC_GET_SCQE_TYPE(scqe) == + scqe_handler_table[index].scqe_type) { + ret = scqe_handler_table[index].pfn_scqe_handle_fun( + hba, scqe); + do_reclaim = scqe_handler_table[index].reclaim_sq_wpg; + + break; + } + + index++; + } + + /* 3. SCQE type check */ + if (unlikely(index == total_index)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]Unknown SCQE type %d", + HIFC_GET_SCQE_TYPE(scqe)); + + UNF_PRINT_SFS_LIMIT(UNF_ERR, hba->port_cfg.port_id, scqe, + sizeof(union hifcoe_scqe_u)); + } + + /* 4. If SCQE is for SQ-WQE then recovery Link List SQ free page */ + if (do_reclaim == UNF_TRUE) { + if (HIFC_SCQE_CONN_ID_VALID(scqe)) { + ret = hifc_reclaim_sq_wqe_page(v_hba, scqe); + } else { + /* NOTE: for buffer clear, the SCQE conn_id is 0xFFFF, + * count with HBA + */ + HIFC_HBA_STAT( + (struct hifc_hba_s *)v_hba, + HIFC_STAT_SQ_IO_BUFFER_CLEARED); + } + } + + return ret; +} + +static void *hifc_get_els_buf_by_userid(struct hifc_hba_s *v_hba, + unsigned short user_id) +{ + struct hifc_srq_buff_entry_s *buf_entry = NULL; + struct hifc_srq_info_s *srq_info = NULL; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_hba, return NULL); + + srq_info = &v_hba->els_srq_info; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, + user_id < srq_info->valid_wqe_num, return NULL); + + buf_entry = &srq_info->els_buff_entry_head[user_id]; + + return buf_entry->buff_addr; +} + +static unsigned int hifc_check_srq_buf_valid(struct hifc_hba_s *v_hba, + unsigned int *v_buf_id, + unsigned int v_buf_num) +{ + unsigned int index = 0; + unsigned int buf_id = 0; + void *srq_buf = NULL; + + for (index = 0; index < v_buf_num; index++) { + buf_id = v_buf_id[index]; + + if (buf_id < v_hba->els_srq_info.valid_wqe_num) { + srq_buf = hifc_get_els_buf_by_userid( + v_hba, + (unsigned short)buf_id); + } else { + srq_buf = NULL; + } + + if (!srq_buf) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get srq buffer user id(0x%x) is null", + v_hba->port_cfg.port_id, buf_id); + + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +static void hifc_reclaim_srq_buff(struct hifc_hba_s *v_hba, + unsigned int *v_buf_id, + unsigned int v_buf_num) +{ + unsigned int index = 0; + unsigned int buf_id = 0; + void *srq_buf = NULL; + + for (index = 0; index < v_buf_num; index++) { + buf_id = v_buf_id[index]; + if (buf_id < v_hba->els_srq_info.valid_wqe_num) { + srq_buf = hifc_get_els_buf_by_userid( + v_hba, + (unsigned short)buf_id); + } else { + srq_buf = NULL; + } + + /* If the value of buffer is NULL, it indicates that the value + * of buffer is invalid. In this case, exit directly. + */ + if (!srq_buf) + break; + + hifc_post_els_srq_wqe(&v_hba->els_srq_info, + (unsigned short)buf_id); + } +} + +static unsigned int hifc_check_els_gs_valid(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe, + struct unf_frame_pkg_s *v_pkg, + unsigned int *v_buf_id, + unsigned int buf_num, + unsigned int frame_len) +{ + unsigned int ox_id = INVALID_VALUE32; + + ox_id = v_pkg->frame_head.oxid_rxid >> 16; + + /* The ELS CMD returns an error code and discards it directly */ + if ((sizeof(struct hifc_fc_frame_header) > frame_len) || + (HIFC_SCQE_HAS_ERRCODE(v_scqe)) || + (buf_num > HIFC_ELS_SRQ_BUF_NUM)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[event]Port(0x%x) get scqe type(0x%x) payload len(0x%x),scq status(0x%x),user id num(0x%x) abnormal", + v_hba->port_cfg.port_id, + HIFC_GET_SCQE_TYPE(v_scqe), + frame_len, + HIFC_GET_SCQE_STATUS(v_scqe), + buf_num); + + /* ELS RSP Special Processing */ + if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_RSP) { + if (HIFC_SCQE_ERR_TO_CM(v_scqe)) { + v_pkg->status = UNF_IO_FAILED; + (void)hifc_rcv_els_rsp(v_hba, v_pkg, ox_id); + } else { + HIFC_HBA_STAT(v_hba, + HIFC_STAT_ELS_RSP_EXCH_REUSE); + } + } + + /* GS RSP Special Processing */ + if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_GS_RSP) { + if (HIFC_SCQE_ERR_TO_CM(v_scqe)) { + v_pkg->status = UNF_IO_FAILED; + (void)hifc_rcv_gs_rsp(v_hba, v_pkg, ox_id); + } else { + HIFC_HBA_STAT(v_hba, + HIFC_STAT_GS_RSP_EXCH_REUSE); + } + } + + /* Reclaim srq */ + if (buf_num <= HIFC_ELS_SRQ_BUF_NUM) + hifc_reclaim_srq_buff(v_hba, v_buf_id, buf_num); + + return UNF_RETURN_ERROR; + } + + /* ELS CMD Check the validity of the buffer sent by the ucode */ + if (HIFC_GET_SCQE_TYPE(v_scqe) == HIFC_SCQE_ELS_CMND) { + if (hifc_check_srq_buf_valid(v_hba, v_buf_id, buf_num) != + RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get els cmnd scqe user id num(0x%x) abnormal, as some srq buff is null", + v_hba->port_cfg.port_id, buf_num); + + hifc_reclaim_srq_buff(v_hba, v_buf_id, buf_num); + + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +static unsigned int hifc_scq_rcv_els_cmd(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = RETURN_OK; + unsigned int pld_len = 0; + unsigned int hdr_len = 0; + unsigned int frame_len = 0; + unsigned int rcv_data_len = 0; + unsigned int max_buf_num = 0; + unsigned short buf_id = 0; + unsigned int index = 0; + unsigned char *pld = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_els_cmd_s *els_cmd = NULL; + struct hifc_fc_frame_header *els_frame = NULL; + struct hifc_fc_frame_header local_fc_frame = { 0 }; + void *els_buf = NULL; + int first_frame = UNF_FALSE; + unsigned long flags = 0; + unsigned char srq_delay_flag = 0; + + els_cmd = &v_scqe->rcv_els_cmd; + frame_len = els_cmd->wd3.data_len; + max_buf_num = els_cmd->wd3.user_id_num; + + pkg.xchg_contex = NULL; + pkg.status = UNF_IO_SUCCESS; + + /* Check the validity of error codes and buff. If an exception occurs, + * discard the error code + */ + ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, els_cmd->user_id, + max_buf_num, frame_len); + if (ret != RETURN_OK) + return RETURN_OK; + + /* Send data to COM cyclically */ + for (index = 0; index < max_buf_num; index++) { + /* Exception record, which is not processed currently */ + if (rcv_data_len >= frame_len) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get els cmd date len(0x%x) is bigger than fream len(0x%x)", + v_hba->port_cfg.port_id, + rcv_data_len, frame_len); + } + + buf_id = (unsigned short)els_cmd->user_id[index]; + els_buf = hifc_get_els_buf_by_userid(v_hba, buf_id); + + /* Obtain playload address */ + pld = (unsigned char *)(els_buf); + hdr_len = 0; + first_frame = UNF_FALSE; + if (index == 0) { + els_frame = (struct hifc_fc_frame_header *)els_buf; + pld = (unsigned char *)(els_frame + 1); + + hdr_len = sizeof(struct hifc_fc_frame_header); + first_frame = UNF_TRUE; + + memcpy(&local_fc_frame, els_frame, + sizeof(struct hifc_fc_frame_header)); + hifc_big_to_cpu32(&local_fc_frame, + sizeof(struct hifc_fc_frame_header)); + memcpy(&pkg.frame_head, &local_fc_frame, + sizeof(pkg.frame_head)); + } + + /* Calculate the playload length */ + pkg.last_pkg_flag = 0; + pld_len = HIFC_SRQ_ELS_SGE_LEN; + + if ((rcv_data_len + HIFC_SRQ_ELS_SGE_LEN) >= frame_len) { + pkg.last_pkg_flag = 1; + pld_len = frame_len - rcv_data_len; + + if (unlikely( + (v_hba->active_topo == UNF_TOP_P2P_MASK) && + (v_hba->delay_info.root_rq_rcvd_flag == 0))) { + /* Only data is pushed for the first time, but + * the last packet flag is not set + */ + pkg.last_pkg_flag = 0; + srq_delay_flag = 1; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) revd els from srq, and need delay processed, topo(0x%x)", + v_hba->port_cfg.port_id, + v_hba->active_topo); + } + } + + /* Push data to COM */ + if (ret == RETURN_OK) { + ret = hifc_rcv_els_cmnd(v_hba, &pkg, pld, + (pld_len - hdr_len), + first_frame); + + /* If the plogi arrives before the flogi, the pkg is + * saved, and the last packet is pushed + * when the root rq contains content. + */ + if (unlikely(srq_delay_flag == 1)) { + spin_lock_irqsave(&v_hba->delay_info.srq_lock, + flags); + memcpy(&v_hba->delay_info.pkg, &pkg, + sizeof(pkg)); + v_hba->delay_info.srq_delay_flag = 1; + v_hba->delay_info.pkg.last_pkg_flag = 1; + + /* Add a 20-ms timer to prevent the root rq + * from processing data + */ + (void)queue_delayed_work( + v_hba->work_queue, + &v_hba->delay_info.del_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SRQ_PROCESS_DELAY_MS)); + + spin_unlock_irqrestore( + &v_hba->delay_info.srq_lock, flags); + } + } + + /* Reclaim srq buffer */ + hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id); + + rcv_data_len += pld_len; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) recv ELS Type(0x%x) Cmnd(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) %u", + v_hba->port_cfg.port_id, + pkg.type, + pkg.cmnd, + els_cmd->wd2.ox_id, + els_cmd->wd2.rx_id, + els_cmd->wd1.sid, + els_cmd->wd0.did, + ret); + + return ret; +} + +static unsigned int hifc_get_els_gs_pld_len(struct hifc_hba_s *v_hba, + unsigned int v_rcv_data_len, + unsigned int v_frame_len) +{ + unsigned int pld_len; + + /* Exception record, which is not processed currently */ + if (v_rcv_data_len >= v_frame_len) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) get els rsp date len(0x%x) is bigger than fream len(0x%x)", + v_hba->port_cfg.port_id, + v_rcv_data_len, v_frame_len); + } + + pld_len = HIFC_SRQ_ELS_SGE_LEN; + if ((v_rcv_data_len + HIFC_SRQ_ELS_SGE_LEN) >= v_frame_len) + pld_len = v_frame_len - v_rcv_data_len; + + return pld_len; +} + +static unsigned int hifc_scq_rcv_els_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = RETURN_OK; + unsigned int pld_len = 0; + unsigned int hdr_len = 0; + unsigned int frame_len = 0; + unsigned int rcv_data_len = 0; + unsigned int max_buf_num = 0; + unsigned short buf_id = 0; + unsigned int index = 0; + unsigned int ox_id = (~0); + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_els_gs_rsp_s *els_rsp; + struct hifc_fc_frame_header *els_frame = NULL; + void *els_buf = NULL; + unsigned char *pld = NULL; + + els_rsp = &v_scqe->rcv_els_gs_rsp; + frame_len = els_rsp->wd2.data_len; + max_buf_num = els_rsp->wd4.user_id_num; + + ox_id = (unsigned int)(els_rsp->wd1.ox_id) - v_hba->exit_base; + pkg.frame_head.oxid_rxid = (unsigned int)(els_rsp->wd1.rx_id) | + ox_id << 16; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = els_rsp->magic_num; + pkg.frame_head.csctl_sid = els_rsp->wd4.sid; + pkg.frame_head.rctl_did = els_rsp->wd3.did; + pkg.status = UNF_IO_SUCCESS; + + /* Handle the exception first. The ELS RSP returns the error code. + * Only the OXID can submit the error code to the CM layer. + */ + ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, + els_rsp->user_id, max_buf_num, frame_len); + if (ret != RETURN_OK) + return RETURN_OK; + + /* if this is echo rsp */ + if (els_rsp->wd3.echo_rsp == UNF_TRUE) { + /* echo time stamp fill in the Els rsp user_id last 4dword */ + pkg.private[PKG_PRIVATE_ECHO_CMD_RCV_TIME] = + els_rsp->user_id[5]; + pkg.private[PKG_PRIVATE_ECHO_RSP_SND_TIME] = + els_rsp->user_id[6]; + pkg.private[PKG_PRIVATE_ECHO_CMD_SND_TIME] = + els_rsp->user_id[7]; + pkg.private[PKG_PRIVATE_ECHO_ACC_RCV_TIME] = + els_rsp->user_id[8]; + } + + /* Send data to COM cyclically */ + for (index = 0; index < max_buf_num; index++) { + /* Obtain buffer address */ + els_buf = NULL; + buf_id = (unsigned short)els_rsp->user_id[index]; + + els_buf = hifc_get_els_buf_by_userid(v_hba, buf_id); + + /* If the value of buffer is NULL, the buff id is abnormal and + * exits directly + */ + if (unlikely(!els_buf)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) Index(0x%x) get els rsp buff user id(0x%x) abnormal", + v_hba->port_cfg.port_id, ox_id, + els_rsp->wd1.rx_id, els_rsp->wd4.sid, + els_rsp->wd3.did, index, buf_id); + + if (index == 0) { + pkg.status = UNF_IO_FAILED; + ret = hifc_rcv_els_rsp(v_hba, &pkg, ox_id); + } + + return ret; + } + + hdr_len = 0; + pld = (unsigned char *)(els_buf); + if (index == 0) { + hdr_len = sizeof(struct hifc_fc_frame_header); + + els_frame = (struct hifc_fc_frame_header *)els_buf; + pld = (unsigned char *)(els_frame + 1); + } + + /* Calculate the playload length */ + pld_len = hifc_get_els_gs_pld_len(v_hba, rcv_data_len, + frame_len); + + /* Push data to COM */ + if (ret == RETURN_OK) { + ret = hifc_recv_els_rsp_payload(v_hba, &pkg, ox_id, pld, + (pld_len - hdr_len)); + } + + /* Reclaim srq buffer */ + hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id); + + rcv_data_len += pld_len; + } + + if ((els_rsp->wd3.end_rsp) && (ret == RETURN_OK)) + ret = hifc_rcv_els_rsp(v_hba, &pkg, ox_id); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) receive ELS RSP OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) end_rsp(0x%x) user_num(0x%x)", + v_hba->port_cfg.port_id, + ox_id, + els_rsp->wd1.rx_id, + els_rsp->wd4.sid, + els_rsp->wd3.did, + els_rsp->wd3.end_rsp, + els_rsp->wd4.user_id_num); + + return ret; +} + +static unsigned int hifc_scq_rcv_gs_rsp(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = RETURN_OK; + unsigned int pld_len = 0; + unsigned int hdr_len = 0; + unsigned int frame_len = 0; + unsigned int rcv_data_len = 0; + unsigned int max_buf_num = 0; + unsigned short buf_id = 0; + unsigned int index = 0; + unsigned int ox_id = (~0); + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_rcv_els_gs_rsp_s *gs_rsp = NULL; + struct hifc_fc_frame_header *gs_frame = NULL; + void *gs_buf = NULL; + unsigned char *pld = NULL; + + gs_rsp = &v_scqe->rcv_els_gs_rsp; + frame_len = gs_rsp->wd2.data_len; + max_buf_num = gs_rsp->wd4.user_id_num; + + ox_id = (unsigned int)(gs_rsp->wd1.ox_id) - v_hba->exit_base; + pkg.frame_head.oxid_rxid = (unsigned int)(gs_rsp->wd1.rx_id) | + ox_id << 16; + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = gs_rsp->magic_num; + pkg.frame_head.csctl_sid = gs_rsp->wd4.sid; + pkg.frame_head.rctl_did = gs_rsp->wd3.did; + pkg.status = UNF_IO_SUCCESS; + + if (gs_rsp->wd3.end_rsp) + HIFC_HBA_STAT(v_hba, HIFC_STAT_LAST_GS_SCQE); + + /* Exception handling: The GS RSP returns an error code. Only the OXID + * can submit the error code to the CM layer + */ + ret = hifc_check_els_gs_valid(v_hba, v_scqe, &pkg, gs_rsp->user_id, + max_buf_num, frame_len); + if (ret != RETURN_OK) + return RETURN_OK; + + /* Send data to COM cyclically */ + for (index = 0; index < max_buf_num; index++) { + /* Obtain buffer address */ + gs_buf = NULL; + buf_id = (unsigned short)gs_rsp->user_id[index]; + + gs_buf = hifc_get_els_buf_by_userid(v_hba, buf_id); + + if (unlikely(!gs_buf)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) Index(0x%x) get gs rsp scqe user id(0x%x) abnormal", + v_hba->port_cfg.port_id, ox_id, + gs_rsp->wd1.rx_id, gs_rsp->wd4.sid, + gs_rsp->wd3.did, index, buf_id); + + if (index == 0) { + pkg.status = UNF_IO_FAILED; + ret = hifc_rcv_gs_rsp(v_hba, &pkg, ox_id); + } + + return ret; + } + + /* Obtain playload address */ + hdr_len = 0; + pld = (unsigned char *)(gs_buf); + if (index == 0) { + hdr_len = sizeof(struct hifc_fc_frame_header); + + gs_frame = (struct hifc_fc_frame_header *)gs_buf; + pld = (unsigned char *)(gs_frame + 1); + } + + /* Calculate the playload length */ + pld_len = hifc_get_els_gs_pld_len(v_hba, rcv_data_len, + frame_len); + + /* Push data to COM */ + if (ret == RETURN_OK) + ret = hifc_rcv_gs_rsp_payload(v_hba, &pkg, ox_id, pld, + (pld_len - hdr_len)); + + /* Reclaim srq buffer */ + hifc_post_els_srq_wqe(&v_hba->els_srq_info, buf_id); + + rcv_data_len += pld_len; + } + + if ((gs_rsp->wd3.end_rsp) && (ret == RETURN_OK)) + ret = hifc_rcv_gs_rsp(v_hba, &pkg, ox_id); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) recv GS RSP OXID(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) end_rsp(0x%x) user_num(0x%x)", + v_hba->port_cfg.port_id, + ox_id, + gs_rsp->wd1.rx_id, + gs_rsp->wd4.sid, + gs_rsp->wd3.did, + gs_rsp->wd3.end_rsp, + gs_rsp->wd4.user_id_num); + + return ret; +} + +static unsigned int hifc_scq_rcv_els_rsp_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rx_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_comm_rsp_sts_s *els_rsp_sts = NULL; + + els_rsp_sts = &v_scqe->comm_sts; + rx_id = (unsigned int)els_rsp_sts->wd0.rx_id; + rx_id = rx_id - v_hba->exit_base; + + pkg.private[PKG_PRIVATE_XCHG_ALLOC_TIME] = els_rsp_sts->magic_num; + pkg.frame_head.oxid_rxid = rx_id | + (unsigned int)(els_rsp_sts->wd0.ox_id) << 16; + + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) + pkg.status = UNF_IO_FAILED; + else + pkg.status = UNF_IO_SUCCESS; + + ret = hifc_rcv_els_rsp_sts(v_hba, &pkg, rx_id); + + return ret; +} + +static unsigned int hifc_check_rport_is_valid( + const struct hifc_parent_queue_info_s *v_prntq_info, + unsigned int scqe_xid) +{ + if (v_prntq_info->parent_ctx.cqm_parent_ctx_obj) { + if ((v_prntq_info->parent_sq_info.context_id & + HIFC_CQM_XID_MASK) == (scqe_xid & HIFC_CQM_XID_MASK)) + return RETURN_OK; + } + + return UNF_RETURN_ERROR; +} + +static unsigned int hifc_scq_rcv_offload_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int rport_valid = UNF_RETURN_ERROR; + unsigned int rport_index = 0; + unsigned int cache_id = 0; + unsigned int local_ctx_id = 0; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + struct hifcoe_scqe_sess_sts_s *offload_sts = NULL; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + + offload_sts = &v_scqe->sess_sts; + rport_index = offload_sts->wd1.conn_id; + cache_id = offload_sts->wd2.cid; + local_ctx_id = offload_sts->wd0.xid_qpn; + + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x) is invalid, cache id(0x%x)", + v_hba->port_cfg.port_id, rport_index, cache_id); + + return UNF_RETURN_ERROR; + } + + prnt_qinfo = &v_hba->parent_queue_mgr->parent_queues[rport_index]; + + rport_valid = hifc_check_rport_is_valid(prnt_qinfo, local_ctx_id); + if (rport_valid != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x), context id(0x%x) is invalid", + v_hba->port_cfg.port_id, rport_index, local_ctx_id); + + return UNF_RETURN_ERROR; + } + + /* off_load failed */ + if (HIFC_GET_SCQE_STATUS(v_scqe) != HIFC_COMPLETION_STATUS_SUCCESS) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x), rport(0x%x), context id(0x%x), cache id(0x%x), offload failed", + v_hba->port_cfg.port_id, rport_index, + local_ctx_id, cache_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + prnt_qinfo->parent_sq_info.cache_id = cache_id; + prnt_qinfo->offload_state = HIFC_QUEUE_STATE_OFFLOADED; + atomic_set(&prnt_qinfo->parent_sq_info.sq_cashed, UNF_TRUE); + + if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == UNF_TRUE) { + destroy_sqe_info.valid = + prnt_qinfo->parent_sq_info.destroy_sqe.valid; + + destroy_sqe_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_index; + + destroy_sqe_info.time_out = + prnt_qinfo->parent_sq_info.destroy_sqe.time_out; + + destroy_sqe_info.start_jiff = + prnt_qinfo->parent_sq_info.destroy_sqe.start_jiff; + + destroy_sqe_info.rport_info.nport_id = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id; + destroy_sqe_info.rport_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index; + destroy_sqe_info.rport_info.port_name = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name; + + prnt_qinfo->parent_sq_info.destroy_sqe.valid = UNF_FALSE; + } + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, flag); + + hifc_pop_destroy_parent_queue_sqe((void *)v_hba, &destroy_sqe_info); + + return RETURN_OK; +} + +unsigned int hifc_get_gs_req_and_rsp_pld_len(unsigned short cmd_code, + unsigned int *v_gs_pld_len, + unsigned int *v_gs_rsp_pld_len) +{ + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_gs_pld_len, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_gs_rsp_pld_len, + return UNF_RETURN_ERROR); + + switch (cmd_code) { + case NS_GPN_ID: + *v_gs_pld_len = UNF_GPNID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GPNID_RSP_PAYLOAD_LEN; + break; + + case NS_GNN_ID: + *v_gs_pld_len = UNF_GNNID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GNNID_RSP_PAYLOAD_LEN; + break; + + case NS_GFF_ID: + *v_gs_pld_len = UNF_GFFID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GFFID_RSP_PAYLOAD_LEN; + break; + + case NS_GID_FT: + case NS_GID_PT: + *v_gs_pld_len = UNF_GID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; + break; + + case NS_RFT_ID: + *v_gs_pld_len = UNF_RFTID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN; + break; + + case NS_RFF_ID: + *v_gs_pld_len = UNF_RFFID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_RFFID_RSP_PAYLOAD_LEN; + break; + case NS_GA_NXT: + *v_gs_pld_len = UNF_GID_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; + break; + + case NS_GIEL: + *v_gs_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN; + *v_gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Unknown GS commond type(0x%x)", cmd_code); + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static unsigned int hifc_send_gs_via_parent(void *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned short ox_id, rx_id; + unsigned short cmd_code = UNF_ZERO; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int gs_pld_len = UNF_ZERO; + unsigned int gs_rsp_pld_len = UNF_ZERO; + void *gs_pld_addr = NULL; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_sq_info_s *sq_info; + struct hifcoe_sqe_s sqe; + unsigned long long fram_phy_addr; + + hba = (struct hifc_hba_s *)v_hba; + + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + + sq_info = hifc_find_parent_sq_by_pkg(hba, v_pkg); + if (!sq_info) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Get NULL parent SQ information"); + + return ret; + } + + cmd_code = HIFC_GET_GS_CMND_CODE(v_pkg->cmnd); + + ret = hifc_get_gs_req_and_rsp_pld_len(cmd_code, &gs_pld_len, + &gs_rsp_pld_len); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) send GS SID(0x%x) DID(0x%x), get error GS request and response payload length", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + gs_pld_addr = (void *)(HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg)); + fram_phy_addr = v_pkg->unf_cmnd_pload_bl.buf_dma_addr + + sizeof(struct unf_fchead_s); + + if (cmd_code == NS_GID_FT || cmd_code == NS_GID_PT) + gs_pld_addr = (void *)(UNF_GET_GID_PAYLOAD(v_pkg)); + + /* Assemble the SQE Control Section part */ + hifc_build_service_wqe_ctrl_section( + &sqe.ctrl_sl, + HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE), + HIFC_BYTES_TO_QW_NUM(sizeof(struct hifcoe_variable_sge_s))); + /* Assemble the SQE Task Section part */ + ox_id = UNF_GET_OXID(v_pkg) + hba->exit_base; + rx_id = UNF_GET_RXID(v_pkg); + hifc_build_service_wqe_ts_common(&sqe.ts_sl, + sq_info->rport_index, ox_id, + rx_id, HIFC_LSW(gs_pld_len)); + hifc_build_gs_wqe_ts_req(&sqe, UNF_GETXCHGALLOCTIME(v_pkg)); + + hifc_build_els_gs_wqe_sge(&sqe, gs_pld_addr, fram_phy_addr, gs_pld_len, + sq_info->context_id, v_hba); + + ret = hifc_parent_sq_enqueue(sq_info, &sqe); + + return ret; +} + +unsigned int hifc_send_gs_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + + UNF_CHECK_VALID(0x4913, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4914, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4915, UNF_TRUE, UNF_GET_SFS_ENTRY(v_pkg), + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4916, UNF_TRUE, HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg), + return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + + hba = (struct hifc_hba_s *)v_hba; + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + + if (!prnt_qinfo) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) send GS SID(0x%x) DID(0x%x), get a null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + if (HIFC_RPORT_NOT_OFFLOADED(prnt_qinfo)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) send GS SID(0x%x) DID(0x%x), send GS Request before PLOGI", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + ret = hifc_send_gs_via_parent(v_hba, v_pkg); + + return ret; +} + +static unsigned int hifc_get_bls_pld_len(struct unf_frame_pkg_s *v_pkg, + unsigned int *v_frame_len) +{ + unsigned int ret = RETURN_OK; + unsigned int rctl = 0; + + UNF_CHECK_VALID(0x4917, UNF_TRUE, v_frame_len, return UNF_RETURN_ERROR); + + rctl = UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head); + if (rctl == HIFC_RCTL_BLS_ACC) { + /* BA_ACC */ + *v_frame_len = sizeof(struct unf_ba_acc_s); + } else if (rctl == HIFC_RCTL_BLS_RJT) { + /* BA_RJT */ + *v_frame_len = sizeof(struct unf_ba_rjt_s); + } else { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[warn]PKG Rclt(0x%x) not BLS ACC or RJT", rctl); + + *v_frame_len = 0; + ret = UNF_RETURN_ERROR; + } + + return ret; +} + +static unsigned int hifc_send_bls_via_cmdq(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int rctl = 0; + unsigned int bls_pld_len = 0; + unsigned short rx_id = INVALID_VALUE16; + unsigned short ox_id = INVALID_VALUE16; + unsigned short exch_id = INVALID_VALUE16; + unsigned char *bls_pld_addr = NULL; + union hifc_cmdqe_u cmdqe; + struct hifc_parent_sq_info_s *sq_info = NULL; + + sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg); + if (!sq_info) { + HIFC_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", + v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + /* Determine whether the value is ACC or RTJ and obtain the payload + * length of the ABTS_RSP + */ + ret = hifc_get_bls_pld_len(v_pkg, &bls_pld_len); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cmdq send BLS PKG DID(0x%x) failed", + v_hba->port_index, v_pkg->frame_head.rctl_did); + + return UNF_RETURN_ERROR; + } + + rctl = UNF_GET_FC_HEADER_RCTL(&v_pkg->frame_head); + exch_id = (v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) & 0xffff; + if ((exch_id == INVALID_VALUE16) && (rctl == HIFC_RCTL_BLS_ACC)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) cmdq send BA_ACC with error RXID(0xffff)", + v_hba->port_index); + + return UNF_RETURN_ERROR; + } + + /* + * FC-FS-3 15.3.3.1 Description: + * The OX_ID and RX_ID shall be set to match the Exchange in which + * the ABTS frame was transmitted. + */ + rx_id = UNF_GET_FC_HEADER_RXID(&v_pkg->frame_head); + ox_id = UNF_GET_FC_HEADER_OXID(&v_pkg->frame_head); + + if (exch_id != INVALID_VALUE16) { + exch_id = exch_id + v_hba->exit_base; + } else { + /* If the number is not an immediate number and the rxid is not + * allocated to the CM, the CM may correspond to the rjt. + */ + } + + memset(&cmdqe, 0, sizeof(cmdqe)); + hifc_build_cmdqe_common(&cmdqe, HIFC_CMDQE_ABTS_RSP, exch_id); + cmdqe.snd_abts_rsp.wd1.ox_id = ox_id; + cmdqe.snd_abts_rsp.wd1.port_id = v_hba->port_index; + cmdqe.snd_abts_rsp.wd1.payload_len = bls_pld_len; + cmdqe.snd_abts_rsp.wd1.rsp_type = ((rctl == HIFC_RCTL_BLS_ACC) ? 0 : 1); + cmdqe.snd_abts_rsp.wd2.conn_id = sq_info->rport_index; + cmdqe.snd_abts_rsp.wd2.scqn = hifc_get_rport_maped_sts_scqn(v_hba, + sq_info->rport_index); + cmdqe.snd_abts_rsp.wd3.xid = sq_info->context_id; + cmdqe.snd_abts_rsp.wd4.cid = sq_info->cache_id; + cmdqe.snd_abts_rsp.wd5.req_rx_id = rx_id; + bls_pld_addr = HIFC_GET_RSP_PAYLOAD_ADDR(v_pkg); + memcpy(cmdqe.snd_abts_rsp.payload, bls_pld_addr, bls_pld_len); + + /* Send the ABTS_RSP command via ROOT CMDQ. */ + ret = hifc_root_cmdq_enqueue(v_hba, &cmdqe, sizeof(cmdqe.snd_abts_rsp)); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) RPort(0x%x) send ABTS_RSP OXID(0x%x) RXID(0x%x) EXCHID(0x%x)", + v_hba->port_cfg.port_id, sq_info->rport_index, ox_id, + rx_id, exch_id); + + return ret; +} + +static unsigned int hifc_send_bls_via_parent(struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = INVALID_VALUE16; + unsigned short rx_id = INVALID_VALUE16; + struct hifcoe_sqe_s sqe; + struct hifc_parent_sq_info_s *sq_info = NULL; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + + UNF_CHECK_VALID(0x5015, UNF_TRUE, (v_pkg->type == UNF_PKG_BLS_REQ), + return UNF_RETURN_ERROR); + + memset(&sqe, 0, sizeof(struct hifcoe_sqe_s)); + + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(v_hba, v_pkg); + if (!prnt_qinfo) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", + v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + sq_info = hifc_find_parent_sq_by_pkg(v_hba, v_pkg); + if (!sq_info) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send ABTS SID_DID(0x%x_0x%x) with null parent queue information", + v_hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + rx_id = UNF_GET_RXID(v_pkg); + ox_id = UNF_GET_OXID(v_pkg) + v_hba->exit_base; + + /* Assemble the SQE Control Section part. + * The ABTS does not have Payload. bdsl=0 + */ + hifc_build_service_wqe_ctrl_section( + &sqe.ctrl_sl, + HIFC_BYTES_TO_QW_NUM(HIFC_SQE_TS_SIZE), 0); + + /* Assemble the SQE Task Section BLS Common part. The value of DW2 + * of BLS WQE is Rsvd, and the value of DW2 is 0 + */ + hifc_build_service_wqe_ts_common(&sqe.ts_sl, sq_info->rport_index, + ox_id, rx_id, 0); + + /* Assemble the special part of the ABTS */ + hifc_build_bls_wqe_ts_req(&sqe, v_pkg->frame_head.parameter, + UNF_GETXCHGALLOCTIME(v_pkg)); + + ret = hifc_parent_sq_enqueue(sq_info, &sqe); + + return ret; +} + +unsigned int hifc_send_bls_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct hifc_hba_s *hba = NULL; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + + UNF_CHECK_VALID(0x4913, UNF_TRUE, v_hba, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4914, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x4913, UNF_TRUE, UNF_PKG_BLS_REQ == v_pkg->type, + return UNF_RETURN_ERROR); + + HIFC_CHECK_PKG_ALLOCTIME(v_pkg); + hba = (struct hifc_hba_s *)v_hba; + + prnt_qinfo = hifc_find_parent_queue_info_by_pkg(hba, v_pkg); + if (!prnt_qinfo) { + HIFC_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", + hba->port_cfg.port_id, v_pkg->frame_head.csctl_sid, + v_pkg->frame_head.rctl_did); + + return ret; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + if (HIFC_RPORT_OFFLOADED(prnt_qinfo)) { + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + /* INI: send ABTS_REQ via parent SQ */ + ret = hifc_send_bls_via_parent(hba, v_pkg); + + } else { + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, + flag); + + ret = hifc_send_bls_via_cmdq(hba, v_pkg); + } + + return ret; +} + +static unsigned int hifc_scq_rcv_flush_sq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* + * RCVD sq flush sts + * --->>> continue flush or clear done + */ + unsigned int ret = UNF_RETURN_ERROR; + + if (v_scqe->flush_sts.wd0.port_id != v_hba->port_index) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL, + "[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)", + v_hba->port_cfg.port_id, + v_scqe->clear_sts.wd0.port_id, + v_hba->port_index, + v_hba->q_set_stage); + + return UNF_RETURN_ERROR; + } + + if (v_scqe->flush_sts.wd0.last_flush) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_INFO, + "[info]Port(0x%x) flush sq(0x%x) done, stage(0x%x)", + v_hba->port_cfg.port_id, v_hba->next_clearing_sq, + v_hba->q_set_stage); + + /* If the Flush STS is last one, send cmd done */ + ret = hifc_clear_sq_wqe_done(v_hba); + } else { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_MAJOR, + "[info]Port(0x%x) continue flush sq(0x%x), stage(0x%x)", + v_hba->port_cfg.port_id, v_hba->next_clearing_sq, + v_hba->q_set_stage); + + ret = hifc_clear_pending_sq_wqe(v_hba); + } + + return ret; +} + +static unsigned int hifc_scq_rcv_buf_clear_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* + * clear: fetched sq wqe + * ---to--->>> pending sq wqe + */ + unsigned int ret = UNF_RETURN_ERROR; + + if (v_scqe->clear_sts.wd0.port_id != v_hba->port_index) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_EVENT, UNF_CRITICAL, + "[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)", + v_hba->port_cfg.port_id, + v_scqe->clear_sts.wd0.port_id, + v_hba->port_index, + v_hba->q_set_stage); + + return UNF_RETURN_ERROR; + } + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, UNF_KEVENT, + "[info]Port(0x%x) cleared all fetched wqe, start clear sq pending wqe, stage (0x%x)", + v_hba->port_cfg.port_id, v_hba->q_set_stage); + + v_hba->q_set_stage = HIFC_QUEUE_SET_STAGE_FLUSHING; + ret = hifc_clear_pending_sq_wqe(v_hba); + + return ret; +} + +static unsigned int hifc_scq_rcv_sess_rst_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int rport_index = INVALID_VALUE32; + unsigned long flag = 0; + struct hifc_parent_queue_info_s *parent_queue_info = NULL; + struct hifcoe_scqe_sess_sts_s *sess_sts = + (struct hifcoe_scqe_sess_sts_s *)(void *)v_scqe; + unsigned int ctx_flush_done; + unsigned int *ctx_dw = NULL; + int ret; + + rport_index = sess_sts->wd1.conn_id; + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive reset session cmd sts failed, invlaid rport_index(0x%x) status_code(0x%x) remain_cnt(0x%x)", + v_hba->port_cfg.port_id, + rport_index, + sess_sts->ch.wd0.err_code, + sess_sts->ch.wd0.cqe_remain_cnt); + + return UNF_RETURN_ERROR; + } + + parent_queue_info = + &v_hba->parent_queue_mgr->parent_queues[rport_index]; + + /* + * If only session reset is used, the offload status of sq remains + * unchanged. If a link is deleted, the offload status is set to + * destroying and is irreversible. + */ + spin_lock_irqsave(&parent_queue_info->parent_queue_state_lock, flag); + + /* + * According to the fault tolerance principle, even if the connection + * deletion times out and the sts returns to delete the connection, one + * indicates thatthe cancel timer is successful, and 0 indicates that + * the timer is being processed. + */ + if (!cancel_delayed_work( + &parent_queue_info->parent_sq_info.del_work)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) rport_index(0x%x) delete rport timer maybe timeout", + v_hba->port_cfg.port_id, + rport_index); + } + + /* + * If the SessRstSts is returned too late and the Parent Queue Info + * resource is released, OK is returned. + */ + if (parent_queue_info->offload_state != HIFC_QUEUE_STATE_DESTROYING) { + spin_unlock_irqrestore( + &parent_queue_info->parent_queue_state_lock, flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]Port(0x%x) reset session cmd complete, no need to free parent qinfo, rport_index(0x%x) status_code(0x%x) remain_cnt(0x%x)", + v_hba->port_cfg.port_id, + rport_index, + sess_sts->ch.wd0.err_code, + sess_sts->ch.wd0.cqe_remain_cnt); + + return RETURN_OK; + } + + if (parent_queue_info->parent_ctx.cqm_parent_ctx_obj) { + ctx_dw = (unsigned int *)((void *)(parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr)); + ctx_flush_done = ctx_dw[HIFC_CTXT_FLUSH_DONE_DW_POS] & + HIFC_CTXT_FLUSH_DONE_MASK_BE; + /* memory barr */ + mb(); + if (ctx_flush_done == 0) { + spin_unlock_irqrestore( + &parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) rport(0x%x) flushdone is not set, delay to free parent session", + v_hba->port_cfg.port_id, rport_index); + + /* If flushdone bit is not set,delay free Sq info */ + ret = queue_delayed_work( + v_hba->work_queue, + &parent_queue_info->parent_sq_info.flush_done_tmo_work, + (unsigned long) + msecs_to_jiffies((unsigned int) + HIFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS)); + if (ret == (int)false) { + HIFC_HBA_STAT( + v_hba, + HIFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK); + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) rport(0x%x) queue delayed work failed iret:%d", + v_hba->port_cfg.port_id, + rport_index, ret); + } + + return RETURN_OK; + } + } + + spin_unlock_irqrestore(&parent_queue_info->parent_queue_state_lock, + flag); + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to free parent session with rport_index(0x%x)", + v_hba->port_cfg.port_id, + rport_index); + + hifc_free_parent_queue_info(v_hba, parent_queue_info); + + return RETURN_OK; +} + +static unsigned int hifc_scq_rcv_clear_srq_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + /* + * clear ELS/Immi SRQ + * ---then--->>> Destroy SRQ + */ + + struct hifc_hba_s *hba = v_hba; + struct hifc_srq_info_s *srq_info = NULL; + + if (HIFC_GET_SCQE_STATUS(v_scqe) != 0) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) clear srq failed, status(0x%x)", + v_hba->port_cfg.port_id, + HIFC_GET_SCQE_STATUS(v_scqe)); + + return RETURN_OK; + } + + srq_info = &hba->els_srq_info; + + /* + * 1: cancel timer succeed + * 0: the timer is being processed, the SQ is released when the timer + * times out + */ + if (cancel_delayed_work(&srq_info->del_work)) { + /* + * not free srq resource, it will be freed on hba remove + */ + srq_info->state = HIFC_CLEAN_DONE; + } + + return RETURN_OK; +} + +static unsigned int hifc_scq_rcv_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + unsigned int rx_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + struct hifcoe_scqe_itmf_marker_sts_s *marker_sts = NULL; + + marker_sts = &v_scqe->itmf_marker_sts; + ox_id = (unsigned int)marker_sts->wd1.ox_id; + ox_id = ox_id - v_hba->exit_base; + rx_id = (unsigned int)marker_sts->wd1.rx_id; + pkg.frame_head.oxid_rxid = rx_id | (unsigned int)(ox_id) << 16; + + pkg.frame_head.csctl_sid = marker_sts->wd3.sid; + pkg.frame_head.rctl_did = marker_sts->wd2.did; + + /* 1. set pkg status */ + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) + pkg.status = UNF_IO_FAILED; + else + pkg.status = UNF_IO_SUCCESS; + + /* 2 .process rcvd marker STS: set exchange state */ + ret = hifc_rcv_tmf_marker_sts(v_hba, &pkg, ox_id); + + return ret; +} + +static unsigned int hifc_scq_rcv_abts_marker_sts(struct hifc_hba_s *v_hba, + union hifcoe_scqe_u *v_scqe) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int ox_id = INVALID_VALUE32; + unsigned int rx_id = INVALID_VALUE32; + struct unf_frame_pkg_s pkg = { 0 }; + + struct hifcoe_scqe_abts_marker_sts_s *abts_sts = NULL; + + abts_sts = &v_scqe->abts_marker_sts; + if (!abts_sts) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]ABTS marker STS is NULL"); + return ret; + } + + ox_id = (unsigned int)abts_sts->wd1.ox_id; + ox_id = ox_id - v_hba->exit_base; + rx_id = (unsigned int)abts_sts->wd1.rx_id; + pkg.frame_head.oxid_rxid = rx_id | (unsigned int)(ox_id) << 16; + pkg.frame_head.csctl_sid = abts_sts->wd3.sid; + pkg.frame_head.rctl_did = abts_sts->wd2.did; + /* abts marker abts_maker_status as ucode stat */ + pkg.abts_maker_status = (unsigned int)abts_sts->wd3.io_state; + + if (unlikely(HIFC_SCQE_HAS_ERRCODE(v_scqe))) + pkg.status = UNF_IO_FAILED; + else + pkg.status = UNF_IO_SUCCESS; + + ret = hifc_rcv_abts_marker_sts(v_hba, &pkg, ox_id); + + return ret; +} + +unsigned int hifc_handle_aeq_offload_err(struct hifc_hba_s *v_hba, + struct hifcoe_aqe_data_s *v_aeq_msg) +{ + unsigned int ret = RETURN_OK; + struct hifcoe_aqe_data_s *aeq_msg; + unsigned int rport_index = 0; + unsigned int local_ctx_id = 0; + struct hifc_parent_queue_info_s *prnt_qinfo = NULL; + struct hifc_destroy_ctrl_info_s destroy_sqe_info = { 0 }; + unsigned long flag = 0; + + aeq_msg = v_aeq_msg; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) receive off_load Err Event, EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x)", + v_hba->port_cfg.port_id, aeq_msg->wd0.evt_code, + aeq_msg->wd0.conn_id, aeq_msg->wd1.xid); + + /* Currently, only the offload failure caused by insufficient scqe is + * processed. Other errors are not processed temporarily. + */ + if (unlikely(aeq_msg->wd0.evt_code != + FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL)) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) receive an unsupported error code of AEQ Event, EvtCode(0x%x) Conn_id(0x%x)", + v_hba->port_cfg.port_id, aeq_msg->wd0.evt_code, + aeq_msg->wd0.conn_id); + + return UNF_RETURN_ERROR; + } + HIFC_SCQ_ERR_TYPE_STAT(v_hba, FCOE_ERROR_OFFLOAD_LACKOF_SCQE_FAIL); + + rport_index = aeq_msg->wd0.conn_id; + local_ctx_id = aeq_msg->wd1.xid; + + if (rport_index >= UNF_HIFC_MAXRPORT_NUM) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x) is invalid, Xid(0x%x)", + v_hba->port_cfg.port_id, rport_index, + aeq_msg->wd1.xid); + + return UNF_RETURN_ERROR; + } + + prnt_qinfo = &v_hba->parent_queue_mgr->parent_queues[rport_index]; + if (hifc_check_rport_is_valid(prnt_qinfo, local_ctx_id) != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) receive an error offload status: rport index(0x%x), context id(0x%x) is invalid", + v_hba->port_cfg.port_id, rport_index, local_ctx_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&prnt_qinfo->parent_queue_state_lock, flag); + + /* The offload status is restored only + * when the offload status is offloading + */ + if (prnt_qinfo->offload_state == HIFC_QUEUE_STATE_OFFLOADING) + prnt_qinfo->offload_state = HIFC_QUEUE_STATE_INITIALIZED; + + spin_unlock_irqrestore(&prnt_qinfo->parent_queue_state_lock, flag); + + if (prnt_qinfo->parent_sq_info.destroy_sqe.valid == UNF_TRUE) { + destroy_sqe_info.valid = + prnt_qinfo->parent_sq_info.destroy_sqe.valid; + destroy_sqe_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_index; + destroy_sqe_info.time_out = + prnt_qinfo->parent_sq_info.destroy_sqe.time_out; + destroy_sqe_info.start_jiff = + prnt_qinfo->parent_sq_info.destroy_sqe.start_jiff; + + destroy_sqe_info.rport_info.nport_id = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id; + + destroy_sqe_info.rport_info.rport_index = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index; + + destroy_sqe_info.rport_info.port_name = + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name; + + prnt_qinfo->parent_sq_info.destroy_sqe.valid = UNF_FALSE; + + HIFC_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) pop up delay destroy parent sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", + v_hba->port_cfg.port_id, + destroy_sqe_info.start_jiff, + destroy_sqe_info.time_out, + prnt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index, + HIFC_QUEUE_STATE_INITIALIZED); + + ret = hifc_free_parent_resource(v_hba, + &destroy_sqe_info.rport_info); + if (ret != RETURN_OK) { + HIFC_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) pop delay destroy parent sq failed, rport index 0x%x, rport nport id 0x%x", + v_hba->port_cfg.port_id, + destroy_sqe_info.rport_info.rport_index, + destroy_sqe_info.rport_info.nport_id); + } + } + + return ret; +} diff --git a/drivers/scsi/huawei/hifc/hifc_service.h b/drivers/scsi/huawei/hifc/hifc_service.h new file mode 100644 index 000000000000..c810cc7e64cb --- /dev/null +++ b/drivers/scsi/huawei/hifc/hifc_service.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#ifndef __HIFC_SERVICE_H__ +#define __HIFC_SERVICE_H__ + +/* Send ElsCmnd or ElsRsp */ +unsigned int hifc_send_els_cmnd(void *phba, struct unf_frame_pkg_s *v_pkg); + +/* Send GsCmnd */ +unsigned int hifc_send_gs_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg); + +/* Send BlsCmnd */ +unsigned int hifc_send_bls_cmnd(void *v_hba, struct unf_frame_pkg_s *v_pkg); + +/* Receive Frame from Root RQ */ +unsigned int hifc_rcv_service_frame_from_rq( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_info_s *rq_info, + struct hifc_root_rq_complet_info_s *v_complet_info, + unsigned short v_rcv_buf_num); + +unsigned int hifc_rq_rcv_srv_err(struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_info); + +unsigned int hifc_rq_rcv_els_rsp_sts( + struct hifc_hba_s *v_hba, + struct hifc_root_rq_complet_info_s *v_info); + +/* Receive Frame from SCQ */ +unsigned int hifc_rcv_scqe_entry_from_scq(void *v_hba, void *v_scqe, + unsigned int scq_idx); + +/* FC txmfs */ +#define HIFC_DEFAULT_TX_MAX_FREAM_SIZE 256 + +#define HIFC_FIRST_PKG_FLAG (1 << 0) +#define HIFC_LAST_PKG_FLAG (1 << 1) + +#define HIFC_CHECK_IF_FIRST_PKG(pkg_flag) ((pkg_flag) & HIFC_FIRST_PKG_FLAG) +#define HIFC_CHECK_IF_LAST_PKG(pkg_flag) ((pkg_flag) & HIFC_LAST_PKG_FLAG) + +#define HIFC_GET_SERVICE_TYPE(v_hba) 12 +#define HIFC_GET_PACKET_TYPE(v_service_type) 1 +#define HIFC_GET_PACKET_COS(v_service_type) 1 +#define HIFC_GET_PRLI_PAYLOAD_LEN \ + (UNF_PRLI_PAYLOAD_LEN - UNF_PRLI_SIRT_EXTRA_SIZE) +/* Start addr of the header/payloed of the cmnd buffer in the pkg */ +#define HIFC_FC_HEAD_LEN (sizeof(struct unf_fchead_s)) +#define HIFC_PAYLOAD_OFFSET (sizeof(struct unf_fchead_s)) +#define HIFC_GET_CMND_PAYLOAD_ADDR(v_pkg) \ + UNF_GET_FLOGI_PAYLOAD(v_pkg) +#define HIFC_GET_CMND_HEADER_ADDR(v_pkg) \ + ((v_pkg)->unf_cmnd_pload_bl.buffer_ptr) +#define HIFC_GET_RSP_HEADER_ADDR(v_pkg) \ + ((v_pkg)->unf_rsp_pload_bl.buffer_ptr) +#define HIFC_GET_RSP_PAYLOAD_ADDR(v_pkg) \ + ((v_pkg)->unf_rsp_pload_bl.buffer_ptr + HIFC_PAYLOAD_OFFSET) +#define HIFC_GET_CMND_FC_HEADER(v_pkg) \ + (&(UNF_GET_SFS_ENTRY(v_pkg)->sfs_common.frame_head)) +#define HIFC_PKG_IS_ELS_RSP(els_cmnd_type) \ + (((els_cmnd_type) == ELS_ACC) || ((els_cmnd_type) == ELS_RJT)) +#define HIFC_XID_IS_VALID(xid, exi_base, exi_count) \ + (((xid) >= (exi_base)) && ((xid) < ((exi_base) + (exi_count)))) + +#define UNF_FC_PAYLOAD_ELS_MASK 0xFF000000 +#define UNF_FC_PAYLOAD_ELS_SHIFT 24 +#define UNF_FC_PAYLOAD_ELS_DWORD 0 + +/* Note: this pfcpayload is little endian */ +#define UNF_GET_FC_PAYLOAD_ELS_CMND(pfcpayload) \ + UNF_GET_SHIFTMASK(((unsigned int *)(void *)pfcpayload)\ + [UNF_FC_PAYLOAD_ELS_DWORD], \ + UNF_FC_PAYLOAD_ELS_SHIFT, UNF_FC_PAYLOAD_ELS_MASK) + +#define HIFC_ELS_CMND_MASK 0xffff +#define HIFC_ELS_CMND__RELEVANT_SHIFT 16UL +#define HIFC_GET_ELS_CMND_CODE(__cmnd) \ + ((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK)) +#define HIFC_GET_ELS_RSP_TYPE(__cmnd) \ + ((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK)) +#define HIFC_GET_ELS_RSP_CODE(__cmnd) \ + ((unsigned short)((__cmnd) >> HIFC_ELS_CMND__RELEVANT_SHIFT & \ + HIFC_ELS_CMND_MASK)) +#define HIFC_GET_GS_CMND_CODE(__cmnd) \ + ((unsigned short)((__cmnd) & HIFC_ELS_CMND_MASK)) + +/* ELS CMND Request */ +#define ELS_CMND 0 + +/* fh_f_ctl - Frame control flags. */ +#define HIFC_FC_EX_CTX (1 << 23) /* sent by responder to exchange */ +#define HIFC_FC_SEQ_CTX (1 << 22) /* sent by responder to sequence */ +#define HIFC_FC_FIRST_SEQ (1 << 21) /* first sequence of this exchange */ +#define HIFC_FC_LAST_SEQ (1 << 20) /* last sequence of this exchange */ +#define HIFC_FC_END_SEQ (1 << 19) /* last frame of sequence */ +#define HIFC_FC_END_CONN (1 << 18) /* end of class 1 connection pending */ +#define HIFC_FC_RES_B17 (1 << 17) /* reserved */ +#define HIFC_FC_SEQ_INIT (1 << 16) /* transfer of sequence initiative */ +#define HIFC_FC_X_ID_REASS (1 << 15) /* exchange ID has been changed */ +#define HIFC_FC_X_ID_INVAL (1 << 14) /* exchange ID invalidated */ +#define HIFC_FC_ACK_1 (1 << 12) /* 13:12 = 1: ACK_1 expected */ +#define HIFC_FC_ACK_N (2 << 12) /* 13:12 = 2: ACK_N expected */ +#define HIFC_FC_ACK_0 (3 << 12) /* 13:12 = 3: ACK_0 expected */ +#define HIFC_FC_RES_B11 (1 << 11) /* reserved */ +#define HIFC_FC_RES_B10 (1 << 10) /* reserved */ +#define HIFC_FC_RETX_SEQ (1 << 9) /* retransmitted sequence */ +#define HIFC_FC_UNI_TX (1 << 8) /* unidirectional transmit (class 1) */ +#define HIFC_FC_CONT_SEQ(i) ((i) << 6) +#define HIFC_FC_ABT_SEQ(i) ((i) << 4) +#define HIFC_FC_REL_OFF (1 << 3) /* parameter is relative offset */ +#define HIFC_FC_RES2 (1 << 2) /* reserved */ +#define HIFC_FC_FILL(i) ((i) & 3) /* 1:0: bytes of trailing fill */ + +#define HIFC_FCTL_REQ (HIFC_FC_FIRST_SEQ | HIFC_FC_END_SEQ |\ + HIFC_FC_SEQ_INIT) +#define HIFC_FCTL_RESP (HIFC_FC_EX_CTX | HIFC_FC_LAST_SEQ | \ + HIFC_FC_END_SEQ | HIFC_FC_SEQ_INIT) +#define HIFC_RCTL_BLS_REQ 0x81 +#define HIFC_RCTL_BLS_ACC 0x84 +#define HIFC_RCTL_BLS_RJT 0x85 + +#define UNF_IO_STATE_NEW 0 +#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) +#define TGT_IO_STATE_RSP (1 << 5) +#define TGT_IO_STATE_ABORT (1 << 7) + +enum HIFC_FC_FH_TYPE_E { + HIFC_FC_TYPE_BLS = 0x00, /* basic link service */ + HIFC_FC_TYPE_ELS = 0x01, /* extended link service */ + HIFC_FC_TYPE_IP = 0x05, /* IP over FC, RFC 4338 */ + HIFC_FC_TYPE_FCP = 0x08, /* SCSI FCP */ + HIFC_FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */ + HIFC_FC_TYPE_ILS = 0x22 /* internal link service */ +}; + +enum HIFC_FC_FH_RCTL_E { + HIFC_FC_RCTL_DD_UNCAT = 0x00, /* uncategorized information */ + HIFC_FC_RCTL_DD_SOL_DATA = 0x01, /* solicited data */ + HIFC_FC_RCTL_DD_UNSOL_CTL = 0x02, /* unsolicited control */ + HIFC_FC_RCTL_DD_SOL_CTL = 0x03, /* solicited control or reply */ + HIFC_FC_RCTL_DD_UNSOL_DATA = 0x04, /* unsolicited data */ + HIFC_FC_RCTL_DD_DATA_DESC = 0x05, /* data descriptor */ + HIFC_FC_RCTL_DD_UNSOL_CMD = 0x06, /* unsolicited command */ + HIFC_FC_RCTL_DD_CMD_STATUS = 0x07, /* command status */ + +#define HIFC_FC_RCTL_ILS_REQ HIFC_FC_RCTL_DD_UNSOL_CTL /* ILS request */ +#define HIFC_FC_RCTL_ILS_REP HIFC_FC_RCTL_DD_SOL_CTL /* ILS reply */ + + /* + * Extended Link_Data + */ + HIFC_FC_RCTL_ELS_REQ = 0x22, /* extended link services request */ + HIFC_FC_RCTL_ELS_RSP = 0x23, /* extended link services reply */ + HIFC_FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */ + HIFC_FC_RCTL_ELS4_RSP = 0x33, /* FC-4 ELS reply */ + /* + * Optional Extended Headers + */ + HIFC_FC_RCTL_VFTH = 0x50, /* virtual fabric tagging header */ + HIFC_FC_RCTL_IFRH = 0x51, /* inter-fabric routing header */ + HIFC_FC_RCTL_ENCH = 0x52, /* encapsulation header */ + /* + * Basic Link Services fh_r_ctl values. + */ + HIFC_FC_RCTL_BA_NOP = 0x80, /* basic link service NOP */ + HIFC_FC_RCTL_BA_ABTS = 0x81, /* basic link service abort */ + HIFC_FC_RCTL_BA_RMC = 0x82, /* remove connection */ + HIFC_FC_RCTL_BA_ACC = 0x84, /* basic accept */ + HIFC_FC_RCTL_BA_RJT = 0x85, /* basic reject */ + HIFC_FC_RCTL_BA_PRMT = 0x86, /* dedicated connection preempted */ + /* + * Link Control Information. + */ + HIFC_FC_RCTL_ACK_1 = 0xc0, /* acknowledge_1 */ + HIFC_FC_RCTL_ACK_0 = 0xc1, /* acknowledge_0 */ + HIFC_FC_RCTL_P_RJT = 0xc2, /* port reject */ + HIFC_FC_RCTL_F_RJT = 0xc3, /* fabric reject */ + HIFC_FC_RCTL_P_BSY = 0xc4, /* port busy */ + HIFC_FC_RCTL_F_BSY = 0xc5, /* fabric busy to data frame */ + HIFC_FC_RCTL_F_BSYL = 0xc6, /* fabric busy to link control frame */ + HIFC_FC_RCTL_LCR = 0xc7, /* link credit reset */ + HIFC_FC_RCTL_END = 0xc9 /* end */ +}; + +struct hifc_fc_frame_header { + unsigned char rctl; /* routing control */ + unsigned char did[3]; /* Destination ID */ + + unsigned char cs_ctl; /* class of service control / pri */ + unsigned char sid[3]; /* Source ID */ + + unsigned char type; /* see enum fc_fh_type below */ + unsigned char frame_ctl[3]; /* frame control */ + + unsigned char seq_id; /* sequence ID */ + unsigned char df_ctl; /* data field control */ + unsigned short seq_cnt; /* sequence count */ + + unsigned short ox_id; /* originator exchange ID */ + unsigned short rx_id; /* responder exchange ID */ + unsigned int parm_offset; /* parameter or relative offset */ +}; + +unsigned int hifc_rcv_els_cmnd(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned char *v_pld, + unsigned int pld_len, + int first_frame); +unsigned int hifc_rcv_els_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id); +unsigned int hifc_rcv_els_rsp_sts(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int rx_id); +unsigned int hifc_rcv_gs_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id); +unsigned int hifc_rcv_bls_rsp(const struct hifc_hba_s *v_hba, + struct unf_frame_pkg_s *v_pkg, + unsigned int ox_id); + +void hifc_save_login_para_in_sq_info( + struct hifc_hba_s *v_hba, + struct unf_port_login_parms_s *v_login_coparms); +unsigned int hifc_handle_aeq_offload_err(struct hifc_hba_s *v_hba, + struct hifcoe_aqe_data_s *v_aeg_msg); + +#define HIFC_CHECK_PKG_ALLOCTIME(v_pkg) \ + do { \ + if (unlikely(UNF_GETXCHGALLOCTIME(v_pkg) == 0)) { \ + HIFC_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, \ + UNF_WARN, \ + "[warn]Invalid MagicNum,S_ID(0x%x) D_ID(0x%x) OXID(0x%x) RX_ID(0x%x) pkg type(0x%x) hot pooltag(0x%x)", \ + UNF_GET_SID(v_pkg), \ + UNF_GET_DID(v_pkg), \ + UNF_GET_OXID(v_pkg), \ + UNF_GET_RXID(v_pkg), \ + ((struct unf_frame_pkg_s *)v_pkg)->type, \ + UNF_GET_XCHG_TAG(v_pkg)); \ + } \ + } while (0) + +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_disc.c b/drivers/scsi/huawei/hifc/unf_disc.c new file mode 100644 index 000000000000..12d8514af959 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_disc.c @@ -0,0 +1,1320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_disc.h" +#include "unf_event.h" +#include "unf_lport.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" + +#define UNF_LIST_RSCN_PAGE_CNT 2560 +#define UNF_MAX_PORTS_PRI_LOOP 2 +#define UNF_MAX_GS_SEND_NUM 8 +#define UNF_OS_REMOVE_CARD_TIMEOUT (60 * 1000) + +static void unf_set_disc_state(struct unf_disc_s *v_disc, + enum unf_disc_state_e v_en_states) +{ + UNF_CHECK_VALID(0x651, UNF_TRUE, v_disc, return); + + if (v_en_states != v_disc->en_states) { + /* Reset disc retry count */ + v_disc->retry_count = 0; + } + + v_disc->en_states = v_en_states; +} + +static inline unsigned int unf_get_loop_map(struct unf_lport_s *v_lport, + unsigned char v_loop_map[], + unsigned int loop_map_size) +{ + struct unf_buf_s buf = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID( + 0x652, UNF_TRUE, + v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get, + return UNF_RETURN_ERROR); + + buf.cbuf = v_loop_map; + buf.buf_len = loop_map_size; + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + v_lport->fc_port, + UNF_PORT_CFG_GET_LOOP_MAP, + (void *)&buf); + return ret; +} + +static int unf_discover_private_loop(void *v_arg_in, void *v_arg_out) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_arg_in; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int i = 0; + unsigned char loop_id = 0; + unsigned int alpa_index = 0; + unsigned char loop_map[UNF_LOOPMAP_COUNT]; + + UNF_REFERNCE_VAR(v_arg_out); + UNF_CHECK_VALID(0x653, UNF_TRUE, lport, return UNF_RETURN_ERROR); + memset(loop_map, 0x0, UNF_LOOPMAP_COUNT); + + /* Get Port Loop Map */ + ret = unf_get_loop_map(lport, loop_map, UNF_LOOPMAP_COUNT); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) get loop map failed", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Check Loop Map Ports Count */ + if (loop_map[0] > UNF_MAX_PORTS_PRI_LOOP) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has more than %d ports(%u) in private loop", + lport->port_id, UNF_MAX_PORTS_PRI_LOOP, + loop_map[0]); + + return UNF_RETURN_ERROR; + } + + /* AL_PA = 0 means Public Loop */ + if ((loop_map[1] == UNF_FL_PORT_LOOP_ADDR) || + (loop_map[2] == UNF_FL_PORT_LOOP_ADDR)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) one or more AL_PA is 0x00, indicate it's FL_Port", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* Discovery Private Loop Ports */ + for (i = 0; i < loop_map[0]; i++) { + alpa_index = i + 1; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) start to disc(0x%x) with count(0x%x)", + lport->port_id, loop_map[alpa_index], i); + + /* Check whether need delay to send PLOGI or not */ + loop_id = loop_map[alpa_index]; + unf_login_with_loop_node(lport, (unsigned int)loop_id); + } + + return RETURN_OK; +} + +static unsigned int unf_disc_start(void *v_lport) +{ + /* + * Call by: + * 1. Enter Private Loop Login + * 2. Analysis RSCN payload + * 3. SCR callback + ** + * Doing: + * Fabric/Public Loop: Send GID_PT + * Private Loop: (delay to) send PLOGI or send LOGO immediately + * P2P: do nothing + */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_cm_event_report *event = NULL; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + enum unf_act_topo_e act_topo = UNF_ACT_TOP_UNKNOWN; + + UNF_CHECK_VALID(0x654, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + act_topo = lport->en_act_topo; + disc = &lport->disc; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) with topo(0x%x) begin to discovery", + lport->port_id, act_topo); + + if ((act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (act_topo == UNF_ACT_TOP_PUBLIC_LOOP)) { + /* 1. Fabric or Public Loop Topology: for directory server */ + /* 0xfffffc */ + rport = unf_get_rport_by_nport_id(lport, + UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) unable to get SNS RPort(0xfffffc)", + lport->port_id); + + rport = unf_rport_get_free_and_init( + lport, + UNF_PORT_TYPE_FC, + UNF_FC_FID_DIR_SERV); + if (!rport) + return UNF_RETURN_ERROR; + rport->nport_id = UNF_FC_FID_DIR_SERV; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_set_disc_state(disc, UNF_DISC_ST_START); /* disc start */ + unf_disc_state_ma(lport, UNF_EVENT_DISC_NORMAL_ENTER); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* + * NOTE: Send GID_PT + * The Name Server shall, when it receives a GID_PT request, + * return all Port Identifiers having registered support for + * the specified Port Type. + * One or more Port Identifiers, having registered as + * the specified Port Type, are returned. + */ + ret = unf_send_gid_pt(lport, rport); + if (ret != RETURN_OK) + unf_disc_error_recovery(lport); + } else if (act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + /* Private Loop: to thread process */ + event = unf_get_one_event_node(lport); + UNF_CHECK_VALID(0x655, UNF_TRUE, NULL != event, + return UNF_RETURN_ERROR); + + event->lport = lport; + event->event_asy_flag = UNF_EVENT_ASYN; + event->pfn_unf_event_task = unf_discover_private_loop; + event->para_in = (void *)lport; + + unf_post_one_event_node(lport, event); + } else { + /* P2P toplogy mode: Do nothing */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) with topo(0x%x) need do nothing", + lport->port_id, act_topo); + } + + return ret; +} + +static unsigned int unf_disc_stop(void *v_lport) +{ + /* Call by GID_ACC processer */ + struct unf_lport_s *lport = NULL; + struct unf_lport_s *root_lport = NULL; + struct unf_rport_s *sns_port = NULL; + struct unf_disc_rport_s *disc_rport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_disc_s *root_disc = NULL; + struct list_head *node = NULL; + unsigned long flag = 0; + unsigned int ret = RETURN_OK; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x656, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + root_lport = (struct unf_lport_s *)lport->root_lport; + root_disc = &root_lport->disc; + + /* Get R_Port for Directory server */ + /* 0xfffffc */ + sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find fabric RPort(0xfffffc) failed", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* for R_Port from disc pool busy list */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if (list_empty(&disc->disc_rport_mgr.list_disc_rport_busy)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Empty and return directly */ + return RETURN_OK; + } + + node = (&disc->disc_rport_mgr.list_disc_rport_busy)->next; + do { + /* Delete from Disc busy list */ + disc_rport = list_entry(node, struct unf_disc_rport_s, + entry_rport); + nport_id = disc_rport->nport_id; + list_del_init(node); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Add back to (free) Disc R_Port pool (list) */ + spin_lock_irqsave(&root_disc->rport_busy_pool_lock, flag); + list_add_tail(node, + &root_disc->disc_rport_mgr.list_disc_rports_pool); + spin_unlock_irqrestore(&root_disc->rport_busy_pool_lock, flag); + + /* Send GNN_ID to Name Server */ + ret = unf_get_and_post_disc_event(lport, sns_port, nport_id, + UNF_DISC_GET_NODE_NAME); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->nport_id, UNF_DISC_GET_NODE_NAME, + nport_id); + + /* NOTE: go to next stage */ + unf_rcv_gnn_id_rsp_unknown(lport, sns_port, + nport_id); + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + node = (&disc->disc_rport_mgr.list_disc_rport_busy)->next; + + } while (node != &disc->disc_rport_mgr.list_disc_rport_busy); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + return ret; +} + +static void unf_disc_callback(void *v_lport, unsigned int v_result) +{ + /* Do nothing */ + UNF_REFERNCE_VAR(v_lport); + UNF_REFERNCE_VAR(v_result); +} + +/* + * Function Name : unf_init_rport_pool + * Function Description: Init R_Port (free) Pool + * Input Parameters : struct unf_lport_s *v_lport + * Output Parameters : N/A + * Return Type : unsigned int + */ +static unsigned int unf_init_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_rport_pool_s *rport_pool = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned int bit_map_cnt = 0; + unsigned long flag = 0; + unsigned int max_login = 0; + + UNF_CHECK_VALID(0x657, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* Init RPort Pool info */ + rport_pool = &v_lport->rport_pool; + max_login = v_lport->low_level_func.lport_cfg_items.max_login; + rport_pool->rport_pool_completion = NULL; + rport_pool->rport_pool_count = max_login; + spin_lock_init(&rport_pool->rport_free_pool_lock); + INIT_LIST_HEAD(&rport_pool->list_rports_pool); /* free RPort pool */ + + /* 1. Alloc RPort Pool buffer/resource (memory) */ + rport_pool->rport_pool_add = + vmalloc((size_t)(max_login * sizeof(struct unf_rport_s))); + if (!rport_pool->rport_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate RPort(s) resource failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(rport_pool->rport_pool_add, 0, + (max_login * sizeof(struct unf_rport_s))); + + /* 2. Alloc R_Port Pool bitmap */ + bit_map_cnt = (v_lport->low_level_func.support_max_rport) / + BITS_PER_LONG + 1; + rport_pool->pul_rpi_bitmap = vmalloc((size_t)(bit_map_cnt * + sizeof(unsigned long))); + if (!rport_pool->pul_rpi_bitmap) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) allocate RPort Bitmap failed", + v_lport->port_id); + + vfree(rport_pool->rport_pool_add); + rport_pool->rport_pool_add = NULL; + return UNF_RETURN_ERROR; + } + memset(rport_pool->pul_rpi_bitmap, 0, + (bit_map_cnt * sizeof(unsigned long))); + + /* 3. Rport resource Management: Add Rports (buffer) + * to Rport Pool List + */ + rport = (struct unf_rport_s *)(rport_pool->rport_pool_add); + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + for (i = 0; i < rport_pool->rport_pool_count; i++) { + spin_lock_init(&rport->rport_state_lock); + list_add_tail(&rport->entry_rport, + &rport_pool->list_rports_pool); + sema_init(&rport->task_sema, 0); + rport++; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + return ret; +} + +static void unf_free_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_rport_pool_s *rport_pool = NULL; + int wait = UNF_FALSE; + unsigned long flag = 0; + unsigned int remain = 0; + unsigned long long time_out = 0; + unsigned int max_login = 0; + unsigned int i; + struct unf_rport_s *rport; + + struct completion rport_pool_completion = + COMPLETION_INITIALIZER(rport_pool_completion); + + UNF_CHECK_VALID(0x671, UNF_TRUE, v_lport, return); + UNF_REFERNCE_VAR(remain); + + rport_pool = &v_lport->rport_pool; + max_login = v_lport->low_level_func.lport_cfg_items.max_login; + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + if (max_login != rport_pool->rport_pool_count) { + rport_pool->rport_pool_completion = &rport_pool_completion; + remain = max_login - rport_pool->rport_pool_count; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for RPort pool completion(%ld), remain(0x%x)", + v_lport->port_id, jiffies, remain); + + time_out = wait_for_completion_timeout( + rport_pool->rport_pool_completion, + msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT)); + if (time_out == 0) + unf_cmmark_dirty_mem( + v_lport, + UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for RPort pool completion end(%ld)", + v_lport->port_id, jiffies); + + spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); + rport_pool->rport_pool_completion = NULL; + spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); + } + + rport = (struct unf_rport_s *)(rport_pool->rport_pool_add); + for (i = 0; i < rport_pool->rport_pool_count; i++) { + if (!rport) + break; + rport++; + } + + if ((v_lport->dirty_flag & + UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) == 0) { + vfree(rport_pool->rport_pool_add); + rport_pool->rport_pool_add = NULL; /* R_Port pool */ + vfree(rport_pool->pul_rpi_bitmap); /* R_Port bitmap */ + rport_pool->pul_rpi_bitmap = NULL; + } + UNF_REFERNCE_VAR(remain); +} + +static void unf_init_rscn_node(struct unf_port_id_page_s *v_port_id_page) +{ + UNF_CHECK_VALID(0x658, UNF_TRUE, v_port_id_page, return); + + v_port_id_page->uc_addr_format = 0; + v_port_id_page->uc_event_qualifier = 0; + v_port_id_page->uc_reserved = 0; + v_port_id_page->port_id_area = 0; + v_port_id_page->port_id_domain = 0; + v_port_id_page->port_id_port = 0; +} + +struct unf_port_id_page_s *unf_get_free_rscn_node(void *v_rscn_mg) +{ + /* Call by Save RSCN Port_ID */ + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct unf_port_id_page_s *port_id_node = NULL; + struct list_head *list_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x659, UNF_TRUE, v_rscn_mg, return NULL); + rscn_mgr = (struct unf_rscn_mg_s *)v_rscn_mg; + + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + if (list_empty(&rscn_mgr->list_free_rscn_page)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EQUIP_ATT, + UNF_WARN, + "[warn]No RSCN node anymore"); + + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + return NULL; + } + + /* Get from list_free_RSCN_page */ + list_node = (&rscn_mgr->list_free_rscn_page)->next; + list_del(list_node); + rscn_mgr->free_rscn_count--; + port_id_node = list_entry(list_node, struct unf_port_id_page_s, + list_node_rscn); + unf_init_rscn_node(port_id_node); + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + + return port_id_node; +} + +static void unf_release_rscn_node(void *v_rscn_mg, + void *v_port_id_node) +{ + /* Call by RSCN GID_ACC */ + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct unf_port_id_page_s *port_id_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x660, UNF_TRUE, v_rscn_mg, return); + UNF_CHECK_VALID(0x661, UNF_TRUE, v_port_id_node, return); + rscn_mgr = (struct unf_rscn_mg_s *)v_rscn_mg; + port_id_node = (struct unf_port_id_page_s *)v_port_id_node; + + /* Back to list_free_RSCN_page */ + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + rscn_mgr->free_rscn_count++; + unf_init_rscn_node(port_id_node); + list_add_tail(&port_id_node->list_node_rscn, + &rscn_mgr->list_free_rscn_page); + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); +} + +static unsigned int unf_init_rscn_pool(struct unf_lport_s *v_lport) +{ + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct unf_port_id_page_s *port_id_page = NULL; + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x662, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + rscn_mgr = &v_lport->disc.rscn_mgr; + + /* Get RSCN Pool buffer */ + rscn_mgr->rscn_pool_add = + vmalloc(UNF_LIST_RSCN_PAGE_CNT * + sizeof(struct unf_port_id_page_s)); + if (!rscn_mgr->rscn_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate RSCN pool failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(rscn_mgr->rscn_pool_add, 0, + sizeof(struct unf_port_id_page_s) * UNF_LIST_RSCN_PAGE_CNT); + + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + port_id_page = (struct unf_port_id_page_s *)(rscn_mgr->rscn_pool_add); + for (i = 0; i < UNF_LIST_RSCN_PAGE_CNT; i++) { + /* Add tail to list_free_RSCN_page */ + list_add_tail(&port_id_page->list_node_rscn, + &rscn_mgr->list_free_rscn_page); + + rscn_mgr->free_rscn_count++; + port_id_page++; + } + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + + return ret; +} + +static void unf_free_rscn_pool(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x663, UNF_TRUE, v_lport, return); + + disc = &v_lport->disc; + if (disc->rscn_mgr.rscn_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) free RSCN pool", + v_lport->nport_id); + + vfree(disc->rscn_mgr.rscn_pool_add); + disc->rscn_mgr.rscn_pool_add = NULL; + } +} + +static unsigned int unf_init_rscn_mgr(struct unf_lport_s *v_lport) +{ + struct unf_rscn_mg_s *rscn_mgr = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x664, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + rscn_mgr = &v_lport->disc.rscn_mgr; + + /* free RSCN page list */ + INIT_LIST_HEAD(&rscn_mgr->list_free_rscn_page); + /* busy RSCN page list */ + INIT_LIST_HEAD(&rscn_mgr->list_using_rscn_page); + spin_lock_init(&rscn_mgr->rscn_id_list_lock); + rscn_mgr->free_rscn_count = 0; + rscn_mgr->pfn_unf_get_free_rscn_node = unf_get_free_rscn_node; + rscn_mgr->pfn_unf_release_rscn_node = unf_release_rscn_node; + + ret = unf_init_rscn_pool(v_lport); + return ret; +} + +static void unf_destroy_rscn_mgr(struct unf_lport_s *v_lport) +{ + struct unf_rscn_mg_s *rscn_mgr = NULL; + + UNF_CHECK_VALID(0x665, UNF_TRUE, v_lport, return); + rscn_mgr = &v_lport->disc.rscn_mgr; + + rscn_mgr->free_rscn_count = 0; + rscn_mgr->pfn_unf_get_free_rscn_node = NULL; + rscn_mgr->pfn_unf_release_rscn_node = NULL; + + unf_free_rscn_pool(v_lport); +} + +static unsigned int unf_init_disc_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_disc_rport_mg_s *disc_mgr = NULL; + struct unf_disc_rport_s *disc_rport = NULL; + unsigned int i = 0; + unsigned int max_login = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x662, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + max_login = v_lport->low_level_func.lport_cfg_items.max_login; + disc_mgr = &v_lport->disc.disc_rport_mgr; + + /* Alloc R_Port Disc Pool buffer (address) */ + disc_mgr->disc_pool_add = vmalloc(max_login * + sizeof(struct unf_disc_rport_s)); + if (!disc_mgr->disc_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate disc RPort pool failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(disc_mgr->disc_pool_add, 0, + (max_login * sizeof(struct unf_disc_rport_s))); + + /* Add R_Port to (free) DISC R_Port Pool */ + spin_lock_irqsave(&v_lport->disc.rport_busy_pool_lock, flag); + disc_rport = (struct unf_disc_rport_s *)(disc_mgr->disc_pool_add); + for (i = 0; i < max_login; i++) { + /* Add tail to list_disc_Rport_pool */ + list_add_tail(&disc_rport->entry_rport, + &disc_mgr->list_disc_rports_pool); + + disc_rport++; + } + spin_unlock_irqrestore(&v_lport->disc.rport_busy_pool_lock, flag); + + return RETURN_OK; +} + +static void unf_free_disc_rport_pool(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x663, UNF_TRUE, v_lport, return); + + disc = &v_lport->disc; + if (disc->disc_rport_mgr.disc_pool_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) free disc RPort pool", + v_lport->port_id); + + vfree(disc->disc_rport_mgr.disc_pool_add); + disc->disc_rport_mgr.disc_pool_add = NULL; + } +} + +static int unf_discover_port_info(void *v_arg_in) +{ + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x2250, UNF_TRUE, v_arg_in, return UNF_RETURN_ERROR); + + gs_info = (struct unf_disc_gs_event_info *)v_arg_in; + lport = (struct unf_lport_s *)gs_info->lport; + rport = (struct unf_rport_s *)gs_info->rport; + + switch (gs_info->entype) { + case UNF_DISC_GET_PORT_NAME: + ret = unf_send_gpn_id(lport, rport, gs_info->rport_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send GPN_ID failed RPort(0x%x)", + lport->nport_id, gs_info->rport_id); + unf_rcv_gpn_id_rsp_unknown(lport, gs_info->rport_id); + } + break; + case UNF_DISC_GET_FEATURE: + ret = unf_send_gff_id(lport, rport, gs_info->rport_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send GFF_ID failed to get RPort(0x%x)'s feature", + lport->port_id, gs_info->rport_id); + + unf_rcv_gff_id_rsp_unknown(lport, gs_info->rport_id); + } + break; + case UNF_DISC_GET_NODE_NAME: + ret = unf_send_gnn_id(lport, rport, gs_info->rport_id); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) GNN_ID send failed with NPort ID(0x%x)", + lport->port_id, gs_info->rport_id); + + /* NOTE: Continue to next stage */ + unf_rcv_gnn_id_rsp_unknown(lport, rport, + gs_info->rport_id); + } + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, UNF_ERR, + "[err]Send GS packet type(0x%x) is unknown", + gs_info->entype); + } + + kfree(gs_info); + + return (int)ret; +} + +unsigned int unf_get_and_post_disc_event(void *v_lport, + void *v_sns_port, + unsigned int v_nport_id, + enum unf_disc_type_e v_en_type) +{ + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned long flag = 0; + struct unf_lport_s *root_lport = NULL; + struct unf_lport_s *lport = NULL; + struct unf_disc_manage_info_s *disc_info = NULL; + + UNF_CHECK_VALID(0x654, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x654, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + + if (lport->link_up == UNF_PORT_LINK_DOWN) + return RETURN_OK; + + root_lport = lport->root_lport; + disc_info = &root_lport->disc.disc_thread_info; + + if (disc_info->b_thread_exit == UNF_TRUE) + return RETURN_OK; + + gs_info = kmalloc(sizeof(struct unf_disc_gs_event_info), GFP_ATOMIC); + if (!gs_info) + return UNF_RETURN_ERROR; + + gs_info->entype = v_en_type; + gs_info->lport = v_lport; + gs_info->rport = v_sns_port; + gs_info->rport_id = v_nport_id; + + INIT_LIST_HEAD(&gs_info->list_entry); + + spin_lock_irqsave(&disc_info->disc_event_list_lock, flag); + list_add_tail(&gs_info->list_entry, &disc_info->list_head); + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag); + wake_up_process(disc_info->data_thread); + return RETURN_OK; +} + +static int unf_disc_event_process(void *v_arg) +{ + struct list_head *node = NULL; + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned long flags = 0; + struct unf_disc_s *disc = (struct unf_disc_s *)v_arg; + struct unf_disc_manage_info_s *disc_info = &disc->disc_thread_info; + + UNF_REFERNCE_VAR(v_arg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, + "Port(0x%x) enter discovery thread.", + disc->lport->port_id); + + while (!kthread_should_stop()) { + if (disc_info->b_thread_exit == UNF_TRUE) + break; + + spin_lock_irqsave(&disc_info->disc_event_list_lock, flags); + if ((list_empty(&disc_info->list_head) == UNF_TRUE) || + (atomic_read(&disc_info->disc_contrl_size) == 0)) { + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, + flags); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout((long)msecs_to_jiffies(1000)); + } else { + node = (&disc_info->list_head)->next; + list_del_init(node); + gs_info = list_entry(node, + struct unf_disc_gs_event_info, + list_entry); + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, + flags); + unf_discover_port_info(gs_info); + } + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EVENT, + UNF_MAJOR, + "Port(0x%x) discovery thread over.", disc->lport->port_id); + + return RETURN_OK; +} + +void unf_flush_disc_event(void *v_disc, void *v_vport) +{ + struct unf_disc_s *disc = (struct unf_disc_s *)v_disc; + struct unf_disc_manage_info_s *disc_info = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + struct unf_disc_gs_event_info *gs_info = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, v_disc, return); + + disc_info = &disc->disc_thread_info; + + spin_lock_irqsave(&disc_info->disc_event_list_lock, flag); + list_for_each_safe(list, list_tmp, &disc_info->list_head) { + gs_info = list_entry(list, struct unf_disc_gs_event_info, + list_entry); + + if (!v_vport || gs_info->lport == v_vport) { + list_del_init(&gs_info->list_entry); + kfree(gs_info); + } + } + + if (!v_vport) + atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM); + + spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag); +} + +void unf_disc_ctrl_size_inc(void *v_lport, unsigned int v_cmnd) +{ + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, v_lport, return); + lport = (struct unf_lport_s *)v_lport; + lport = lport->root_lport; + UNF_CHECK_VALID(0x2249, UNF_TRUE, lport, return); + + if (atomic_read(&lport->disc.disc_thread_info.disc_contrl_size) == + UNF_MAX_GS_SEND_NUM) + return; + + if (v_cmnd == NS_GPN_ID || v_cmnd == NS_GNN_ID || v_cmnd == NS_GFF_ID) + atomic_inc(&lport->disc.disc_thread_info.disc_contrl_size); +} + +static void unf_destroy_disc_thread(void *v_disc) +{ + struct unf_disc_manage_info_s *disc_info = NULL; + struct unf_disc_s *disc = (struct unf_disc_s *)v_disc; + + UNF_CHECK_VALID(0x2249, UNF_TRUE, disc, return); + + disc_info = &disc->disc_thread_info; + + disc_info->b_thread_exit = UNF_TRUE; + unf_flush_disc_event(disc, NULL); + + wake_up_process(disc_info->data_thread); + kthread_stop(disc_info->data_thread); + disc_info->data_thread = NULL; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) destroy discovery thread succeed.", + disc->lport->port_id); +} + +static unsigned int unf_create_disc_thread(void *v_disc) +{ + struct unf_disc_manage_info_s *disc_info = NULL; + struct unf_disc_s *disc = (struct unf_disc_s *)v_disc; + + UNF_CHECK_VALID(0x2250, UNF_TRUE, disc, return UNF_RETURN_ERROR); + + /* If the thread cannot be found, apply for a new thread. */ + disc_info = &disc->disc_thread_info; + + memset(disc_info, 0, sizeof(struct unf_disc_manage_info_s)); + + INIT_LIST_HEAD(&disc_info->list_head); + spin_lock_init(&disc_info->disc_event_list_lock); + atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM); + + disc_info->b_thread_exit = UNF_FALSE; + disc_info->data_thread = + kthread_create(unf_disc_event_process, disc, + "%x_DiscT", disc->lport->port_id); + + if (IS_ERR(disc_info->data_thread) || !disc_info->data_thread) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) creat discovery thread(0x%p) unsuccessful.", + disc->lport->port_id, disc_info->data_thread); + + return UNF_RETURN_ERROR; + } + + wake_up_process(disc_info->data_thread); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) creat discovery thread succeed.", + disc->lport->port_id); + + return RETURN_OK; +} + +static void unf_disc_ref_cnt_dec(struct unf_disc_s *v_disc) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x669, UNF_TRUE, v_disc, return); + + spin_lock_irqsave(&v_disc->rport_busy_pool_lock, flags); + if (atomic_dec_and_test(&v_disc->disc_ref_cnt)) { + if (v_disc->disc_completion) + complete(v_disc->disc_completion); + } + spin_unlock_irqrestore(&v_disc->rport_busy_pool_lock, flags); +} + +static void unf_lport_disc_timeout(struct work_struct *v_work) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + enum unf_disc_state_e en_state = UNF_DISC_ST_END; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x675, UNF_TRUE, v_work, return); + + disc = container_of(v_work, struct unf_disc_s, disc_work.work); + if (!disc) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Get discover pointer failed"); + + return; + } + + lport = disc->lport; + if (!lport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Find Port by discovery work failed"); + + unf_disc_ref_cnt_dec(disc); + return; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + en_state = disc->en_states; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* 0xfffffc */ + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find fabric RPort failed", + lport->port_id); + + unf_disc_ref_cnt_dec(disc); + return; + } + + switch (en_state) { + case UNF_DISC_ST_START: + break; + + case UNF_DISC_ST_GIDPT_WAIT: + (void)unf_send_gid_pt(lport, rport); + break; + + case UNF_DISC_ST_GIDFT_WAIT: + (void)unf_send_gid_ft(lport, rport); + break; + + case UNF_DISC_ST_END: + break; + + default: + break; + } + + unf_disc_ref_cnt_dec(disc); +} + +unsigned int unf_init_disc_mgr(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x666, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + disc = &v_lport->disc; + disc->max_retry_count = UNF_DISC_RETRY_TIMES; + disc->retry_count = 0; + disc->disc_flag = UNF_DISC_NONE; + INIT_LIST_HEAD(&disc->list_busy_rports); /* busy RPort pool list */ + /* delete RPort pool list */ + INIT_LIST_HEAD(&disc->list_delete_rports); + /* destroy RPort pool list */ + INIT_LIST_HEAD(&disc->list_destroy_rports); + spin_lock_init(&disc->rport_busy_pool_lock); + + disc->disc_rport_mgr.disc_pool_add = NULL; + /* free disc RPort pool */ + INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rports_pool); + /* busy disc RPort pool */ + INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rport_busy); + + disc->disc_completion = NULL; + disc->lport = v_lport; + INIT_DELAYED_WORK(&disc->disc_work, unf_lport_disc_timeout); + disc->unf_disc_temp.pfn_unf_disc_start = unf_disc_start; + disc->unf_disc_temp.pfn_unf_disc_stop = unf_disc_stop; + disc->unf_disc_temp.pfn_unf_disc_callback = unf_disc_callback; + atomic_set(&disc->disc_ref_cnt, 0); + + /* Init RSCN Manager */ + ret = unf_init_rscn_mgr(v_lport); + if (ret != RETURN_OK) + return UNF_RETURN_ERROR; + + if (v_lport != v_lport->root_lport) + return ret; + + ret = unf_create_disc_thread(disc); + if (ret != RETURN_OK) { + unf_destroy_rscn_mgr(v_lport); + + return UNF_RETURN_ERROR; + } + + /* Init R_Port free Pool */ + ret = unf_init_rport_pool(v_lport); + if (ret != RETURN_OK) { + unf_destroy_disc_thread(disc); + unf_destroy_rscn_mgr(v_lport); + + return UNF_RETURN_ERROR; + } + + /* Init R_Port free disc Pool */ + ret = unf_init_disc_rport_pool(v_lport); + if (ret != RETURN_OK) { + unf_destroy_disc_thread(disc); + unf_free_rport_pool(v_lport); + unf_destroy_rscn_mgr(v_lport); + + return UNF_RETURN_ERROR; + } + + return ret; +} + +static void unf_wait_disc_complete(struct unf_lport_s *v_lport) +{ + struct unf_disc_s *disc = NULL; + int wait = UNF_FALSE; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long long time_out = 0; + + struct completion disc_completion = + COMPLETION_INITIALIZER(disc_completion); + + disc = &v_lport->disc; + + UNF_DELAYED_WORK_SYNC(ret, v_lport->port_id, &disc->disc_work, + "Disc_work"); + if (ret == RETURN_OK) + unf_disc_ref_cnt_dec(disc); + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if (atomic_read(&disc->disc_ref_cnt) != 0) { + disc->disc_completion = &disc_completion; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for discover completion(0x%lx)", + v_lport->port_id, jiffies); + + time_out = wait_for_completion_timeout( + disc->disc_completion, + msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT)); + if (time_out == 0) + unf_cmmark_dirty_mem(v_lport, + UNF_LPORT_DIRTY_FLAG_DISC_DIRTY); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for discover completion end(0x%lx)", + v_lport->port_id, jiffies); + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + disc->disc_completion = NULL; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + } +} + +void unf_disc_mgr_destroy(void *v_lport) +{ + struct unf_disc_s *disc = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x672, UNF_TRUE, v_lport, return); + lport = (struct unf_lport_s *)v_lport; + + disc = &lport->disc; + disc->retry_count = 0; + disc->unf_disc_temp.pfn_unf_disc_start = NULL; + disc->unf_disc_temp.pfn_unf_disc_stop = NULL; + disc->unf_disc_temp.pfn_unf_disc_callback = NULL; + + unf_free_disc_rport_pool(lport); + unf_destroy_rscn_mgr(lport); + unf_wait_disc_complete(lport); + + if (lport != lport->root_lport) + return; + + unf_destroy_disc_thread(disc); + unf_free_rport_pool(lport); + lport->destroy_step = UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR; +} + +void unf_disc_error_recovery(void *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + unsigned long delay = 0; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x673, UNF_TRUE, v_lport, return); + + lport = (struct unf_lport_s *)v_lport; + disc = &lport->disc; + + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find RPort failed", + lport->port_id); + return; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + + /* Delay work is pending */ + if (delayed_work_pending(&disc->disc_work)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) disc_work is running and do nothing", + lport->port_id); + return; + } + + /* Continue to retry */ + if (disc->retry_count < disc->max_retry_count) { + disc->retry_count++; + delay = (unsigned long)lport->ed_tov; + + if (queue_delayed_work(unf_work_queue, &disc->disc_work, + (unsigned long)msecs_to_jiffies( + (unsigned int)delay))) { + atomic_inc(&disc->disc_ref_cnt); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + } else { + /* Go to next stage */ + if (disc->en_states == UNF_DISC_ST_GIDPT_WAIT) { + /* GID_PT_WAIT --->>> Send GID_FT */ + unf_disc_state_ma(lport, UNF_EVENT_DISC_RETRY_TIMEOUT); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + while ((ret != RETURN_OK) && + (disc->retry_count < disc->max_retry_count)) { + ret = unf_send_gid_ft(lport, rport); + disc->retry_count++; + } + } else if (disc->en_states == UNF_DISC_ST_GIDFT_WAIT) { + /* GID_FT_WAIT --->>> Send LOGO */ + unf_disc_state_ma(lport, UNF_EVENT_DISC_RETRY_TIMEOUT); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } else { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } + } +} + +enum unf_disc_state_e unf_disc_stat_start(enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + if (v_en_event == UNF_EVENT_DISC_NORMAL_ENTER) + en_next_state = UNF_DISC_ST_GIDPT_WAIT; + else + en_next_state = v_old_state; + + return en_next_state; +} + +enum unf_disc_state_e unf_disc_stat_gid_pt_wait( + enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + switch (v_en_event) { + case UNF_EVENT_DISC_FAILED: + en_next_state = UNF_DISC_ST_GIDPT_WAIT; + break; + + case UNF_EVENT_DISC_RETRY_TIMEOUT: + en_next_state = UNF_DISC_ST_GIDFT_WAIT; + break; + + case UNF_EVENT_DISC_SUCCESS: + en_next_state = UNF_DISC_ST_END; + break; + + case UNF_EVENT_DISC_LINKDOWN: + en_next_state = UNF_DISC_ST_START; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +enum unf_disc_state_e unf_disc_stat_gid_ft_wait( + enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + switch (v_en_event) { + case UNF_EVENT_DISC_FAILED: + en_next_state = UNF_DISC_ST_GIDFT_WAIT; + break; + + case UNF_EVENT_DISC_RETRY_TIMEOUT: + en_next_state = UNF_DISC_ST_END; + break; + + case UNF_EVENT_DISC_SUCCESS: + en_next_state = UNF_DISC_ST_END; + break; + + case UNF_EVENT_DISC_LINKDOWN: + en_next_state = UNF_DISC_ST_START; + break; + + default: + en_next_state = v_old_state; + break; + } + + return en_next_state; +} + +enum unf_disc_state_e unf_disc_stat_end(enum unf_disc_state_e v_old_state, + enum unf_disc_event_e v_en_event) +{ + enum unf_disc_state_e en_next_state = UNF_DISC_ST_END; + + if (v_en_event == UNF_EVENT_DISC_LINKDOWN) + en_next_state = UNF_DISC_ST_START; + else + en_next_state = v_old_state; + + return en_next_state; +} + +void unf_disc_state_ma(struct unf_lport_s *v_lport, + enum unf_disc_event_e v_en_event) +{ + struct unf_disc_s *disc = NULL; + enum unf_disc_state_e en_old_state = UNF_DISC_ST_START; + enum unf_disc_state_e en_next_state = UNF_DISC_ST_START; + + UNF_CHECK_VALID(0x674, UNF_TRUE, v_lport, return); + + disc = &v_lport->disc; + en_old_state = disc->en_states; + + switch (disc->en_states) { + case UNF_DISC_ST_START: + en_next_state = unf_disc_stat_start(en_old_state, v_en_event); + break; + + case UNF_DISC_ST_GIDPT_WAIT: + en_next_state = unf_disc_stat_gid_pt_wait(en_old_state, + v_en_event); + break; + + case UNF_DISC_ST_GIDFT_WAIT: + en_next_state = unf_disc_stat_gid_ft_wait(en_old_state, + v_en_event); + break; + + case UNF_DISC_ST_END: + en_next_state = unf_disc_stat_end(en_old_state, v_en_event); + break; + + default: + en_next_state = en_old_state; + break; + } + + unf_set_disc_state(disc, en_next_state); +} diff --git a/drivers/scsi/huawei/hifc/unf_disc.h b/drivers/scsi/huawei/hifc/unf_disc.h new file mode 100644 index 000000000000..45fc3e011974 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_disc.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_DISC_H__ +#define __UNF_DISC_H__ + +#define UNF_DISC_RETRY_TIMES 3 +#define UNF_DISC_NONE 0 +#define UNF_DISC_FABRIC 1 +#define UNF_DISC_LOOP 2 + +enum unf_disc_state_e { + UNF_DISC_ST_START = 0x3000, + UNF_DISC_ST_GIDPT_WAIT, + UNF_DISC_ST_GIDFT_WAIT, + UNF_DISC_ST_END +}; + +enum unf_disc_event_e { + UNF_EVENT_DISC_NORMAL_ENTER = 0x8000, + UNF_EVENT_DISC_FAILED = 0x8001, + UNF_EVENT_DISC_SUCCESS = 0x8002, + UNF_EVENT_DISC_RETRY_TIMEOUT = 0x8003, + UNF_EVENT_DISC_LINKDOWN = 0x8004 +}; + +enum unf_disc_type_e { + UNF_DISC_GET_PORT_NAME = 0, + UNF_DISC_GET_NODE_NAME, + UNF_DISC_GET_FEATURE +}; + +struct unf_disc_gs_event_info { + void *lport; + void *rport; + unsigned int rport_id; + enum unf_disc_type_e entype; + struct list_head list_entry; +}; + +unsigned int unf_get_and_post_disc_event(void *v_lport, + void *v_sns_port, + unsigned int v_nport_id, + enum unf_disc_type_e v_en_type); + +void unf_flush_disc_event(void *v_disc, void *v_vport); +void unf_disc_error_recovery(void *v_lport); +void unf_disc_mgr_destroy(void *v_lport); +void unf_disc_ctrl_size_inc(void *v_lport, unsigned int v_cmnd); + +#endif diff --git a/drivers/scsi/huawei/hifc/unf_event.c b/drivers/scsi/huawei/hifc/unf_event.c new file mode 100644 index 000000000000..205824b633a4 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_event.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_event.h" +#include "unf_lport.h" + +struct unf_event_list fc_event_list; +struct unf_global_event_queue global_event_queue; + +/* Max global event node */ +#define UNF_MAX_GLOBAL_ENENT_NODE 24 + +unsigned int unf_init_event_msg(struct unf_lport_s *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned int i; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x770, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + event_mgr = &v_lport->event_mgr; + + /* Get and Initial Event Node resource */ + event_mgr->pmem_add = + vmalloc((size_t)event_mgr->free_event_count * + sizeof(struct unf_cm_event_report)); + if (!event_mgr->pmem_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate event manager failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + memset(event_mgr->pmem_add, 0, + ((size_t)event_mgr->free_event_count * + sizeof(struct unf_cm_event_report))); + + event_node = (struct unf_cm_event_report *)(event_mgr->pmem_add); + + spin_lock_irqsave(&event_mgr->port_event_lock, flags); + for (i = 0; i < event_mgr->free_event_count; i++) { + INIT_LIST_HEAD(&event_node->list_entry); + list_add_tail(&event_node->list_entry, + &event_mgr->list_free_event); + event_node++; + } + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + + return RETURN_OK; +} + +static void unf_del_eventcenter(struct unf_lport_s *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + + UNF_CHECK_VALID(0x771, UNF_TRUE, v_lport, return); + + event_mgr = &v_lport->event_mgr; + event_mgr->pfn_unf_get_free_event = NULL; + event_mgr->pfn_unf_release_event = NULL; + event_mgr->pfn_unf_post_event = NULL; +} + +void unf_init_event_node(struct unf_cm_event_report *v_event_node) +{ + UNF_CHECK_VALID(0x776, UNF_TRUE, v_event_node, return); + + v_event_node->event = UNF_EVENT_TYPE_REQUIRE; + v_event_node->event_asy_flag = UNF_EVENT_ASYN; + v_event_node->delay_times = 0; + v_event_node->para_in = NULL; + v_event_node->para_out = NULL; + v_event_node->result = 0; + v_event_node->lport = NULL; + v_event_node->pfn_unf_event_task = NULL; + v_event_node->pfn_unf_event_recovery_strategy = NULL; + v_event_node->pfn_unf_event_alarm_strategy = NULL; +} + +struct unf_cm_event_report *unf_get_free_event_node(void *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_cm_event_report *event_node = NULL; + struct list_head *list_node = NULL; + struct unf_lport_s *root_lport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x777, UNF_TRUE, v_lport, return NULL); + root_lport = (struct unf_lport_s *)v_lport; + root_lport = root_lport->root_lport; + + if (unlikely(atomic_read(&root_lport->port_no_operater_flag) == + UNF_LPORT_NOP)) + return NULL; + + /* Get EventMgr from Lport */ + event_mgr = &root_lport->event_mgr; + + /* Get free node free pool */ + spin_lock_irqsave(&event_mgr->port_event_lock, flags); + if (list_empty(&event_mgr->list_free_event)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) have no event node anymore", + root_lport->port_id); + + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + return NULL; + } + + list_node = (&event_mgr->list_free_event)->next; + list_del(list_node); + event_mgr->free_event_count--; + event_node = list_entry(list_node, struct unf_cm_event_report, + list_entry); + + /* Initial event node */ + unf_init_event_node(event_node); + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + + return event_node; +} + +void unf_check_event_mgr_status(struct unf_event_mgr *v_event_mgr) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x773, UNF_TRUE, v_event_mgr, return); + + spin_lock_irqsave(&v_event_mgr->port_event_lock, flag); + if ((v_event_mgr->emg_completion) && + (v_event_mgr->free_event_count == UNF_MAX_EVENT_NODE)) { + complete(v_event_mgr->emg_completion); + } + spin_unlock_irqrestore(&v_event_mgr->port_event_lock, flag); +} + +void unf_release_event(void *v_lport, void *v_event_node) +{ + struct unf_event_mgr *event_mgr = NULL; + struct unf_lport_s *root_lport = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x778, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x779, UNF_TRUE, v_event_node, return); + + event_node = (struct unf_cm_event_report *)v_event_node; + root_lport = (struct unf_lport_s *)v_lport; + root_lport = root_lport->root_lport; + event_mgr = &root_lport->event_mgr; + + spin_lock_irqsave(&event_mgr->port_event_lock, flags); + event_mgr->free_event_count++; + unf_init_event_node(event_node); + list_add_tail(&event_node->list_entry, &event_mgr->list_free_event); + spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); + + unf_check_event_mgr_status(event_mgr); +} + +void unf_post_event(void *v_lport, void *v_event_node) +{ + struct unf_cm_event_report *event_node = NULL; + struct unf_chip_manage_info_s *card_thread_info = NULL; + struct unf_lport_s *root_lport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x780, UNF_TRUE, v_event_node, return); + event_node = (struct unf_cm_event_report *)v_event_node; + UNF_REFERNCE_VAR(v_lport); + + /* If null, post to global event center */ + if (!v_lport) { + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, flags); + fc_event_list.list_num++; + list_add_tail(&event_node->list_entry, + &fc_event_list.list_head); + spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock, + flags); + + wake_up_process(event_thread); + } else { + root_lport = (struct unf_lport_s *)v_lport; + root_lport = root_lport->root_lport; + card_thread_info = root_lport->chip_info; + + /* Post to global event center */ + if (!card_thread_info) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, + UNF_WARN, + "[warn]Port(0x%x) has strange event with type(0x%x)", + root_lport->nport_id, event_node->event); + + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, + flags); + fc_event_list.list_num++; + list_add_tail(&event_node->list_entry, + &fc_event_list.list_head); + spin_unlock_irqrestore( + &fc_event_list.fc_eventlist_lock, + flags); + + wake_up_process(event_thread); + } else { + spin_lock_irqsave( + &card_thread_info->chip_event_list_lock, + flags); + card_thread_info->list_num++; + list_add_tail(&event_node->list_entry, + &card_thread_info->list_head); + spin_unlock_irqrestore( + &card_thread_info->chip_event_list_lock, + flags); + + wake_up_process(card_thread_info->data_thread); + } + } +} + +unsigned int unf_init_event_center(void *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x772, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + + /* Initial Disc manager */ + event_mgr = &lport->event_mgr; + event_mgr->free_event_count = UNF_MAX_EVENT_NODE; + event_mgr->pfn_unf_get_free_event = unf_get_free_event_node; + event_mgr->pfn_unf_release_event = unf_release_event; + event_mgr->pfn_unf_post_event = unf_post_event; + + INIT_LIST_HEAD(&event_mgr->list_free_event); + spin_lock_init(&event_mgr->port_event_lock); + event_mgr->emg_completion = NULL; + + ret = unf_init_event_msg(lport); + return ret; +} + +void unf_wait_event_mgr_complete(struct unf_event_mgr *v_event_mgr) +{ + struct unf_event_mgr *event_mgr = NULL; + int wait = UNF_FALSE; + unsigned long mg_flag = 0; + + struct completion fc_event_completion = + COMPLETION_INITIALIZER(fc_event_completion); + + UNF_CHECK_VALID(0x774, UNF_TRUE, v_event_mgr, return); + event_mgr = v_event_mgr; + + spin_lock_irqsave(&event_mgr->port_event_lock, mg_flag); + if (event_mgr->free_event_count != UNF_MAX_EVENT_NODE) { + event_mgr->emg_completion = &fc_event_completion; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&event_mgr->port_event_lock, mg_flag); + + if (wait == UNF_TRUE) + wait_for_completion(event_mgr->emg_completion); + + spin_lock_irqsave(&event_mgr->port_event_lock, mg_flag); + event_mgr->emg_completion = NULL; + spin_unlock_irqrestore(&event_mgr->port_event_lock, mg_flag); +} + +unsigned int unf_event_center_destroy(void *v_lport) +{ + struct unf_event_mgr *event_mgr = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + unsigned long list_lock_flag = 0; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x775, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + event_mgr = &lport->event_mgr; + + spin_lock_irqsave(&fc_event_list.fc_eventlist_lock, list_lock_flag); + if (!list_empty(&fc_event_list.list_head)) { + list_for_each_safe(list, list_tmp, &fc_event_list.list_head) { + event_node = list_entry(list, + struct unf_cm_event_report, + list_entry); + if (lport == event_node->lport) { + list_del_init(&event_node->list_entry); + if (event_node->event_asy_flag == + UNF_EVENT_SYN) { + event_node->result = UNF_RETURN_ERROR; + complete(&event_node->event_comp); + } + + spin_lock_irqsave(&event_mgr->port_event_lock, + flag); + event_mgr->free_event_count++; + list_add_tail(&event_node->list_entry, + &event_mgr->list_free_event); + spin_unlock_irqrestore( + &event_mgr->port_event_lock, flag); + } + } + } + spin_unlock_irqrestore(&fc_event_list.fc_eventlist_lock, + list_lock_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait event", lport->port_id); + unf_wait_event_mgr_complete(event_mgr); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait event process end", lport->port_id); + + unf_del_eventcenter(lport); + vfree(event_mgr->pmem_add); + event_mgr->pmem_add = NULL; + lport->destroy_step = UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER; + + return ret; +} + +static void unf_procee_asyn_event(struct unf_cm_event_report *v_event_node) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = (struct unf_lport_s *)v_event_node->lport; + + UNF_CHECK_VALID(0x782, UNF_TRUE, lport, return); + if (v_event_node->pfn_unf_event_task) + ret = (unsigned int) + v_event_node->pfn_unf_event_task(v_event_node->para_in, + v_event_node->para_out); + + if (lport->event_mgr.pfn_unf_release_event) + lport->event_mgr.pfn_unf_release_event(lport, v_event_node); + + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_WARN, + "[warn]Port(0x%x) handle event(0x%x) failed", + lport->port_id, v_event_node->event); + } + + UNF_REFERNCE_VAR(ret); +} + +void unf_release_global_event(void *v_event_node) +{ + unsigned long flag = 0; + struct unf_cm_event_report *event_node = NULL; + + UNF_CHECK_VALID(0x784, UNF_TRUE, v_event_node, return); + event_node = (struct unf_cm_event_report *)v_event_node; + unf_init_event_node(event_node); + + spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag); + global_event_queue.list_number++; + list_add_tail(&event_node->list_entry, + &global_event_queue.global_eventlist); + spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock, + flag); +} + +void unf_handle_event(struct unf_cm_event_report *v_event_node) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned int event = 0; + unsigned int event_asy_flag = UNF_EVENT_ASYN; + + UNF_CHECK_VALID(0x781, UNF_TRUE, v_event_node, return); + UNF_REFERNCE_VAR(ret); + UNF_REFERNCE_VAR(event); + + event = v_event_node->event; + event_asy_flag = v_event_node->event_asy_flag; + + switch (event_asy_flag) { + case UNF_EVENT_SYN: /* synchronous event node */ + case UNF_GLOBAL_EVENT_SYN: + if (v_event_node->pfn_unf_event_task) { + ret = (unsigned int)v_event_node->pfn_unf_event_task( + v_event_node->para_in, + v_event_node->para_out); + } + v_event_node->result = ret; + complete(&v_event_node->event_comp); + break; + case UNF_EVENT_ASYN: /* asynchronous event node */ + unf_procee_asyn_event(v_event_node); + break; + case UNF_GLOBAL_EVENT_ASYN: + if (v_event_node->pfn_unf_event_task) { + ret = (unsigned int)v_event_node->pfn_unf_event_task( + v_event_node->para_in, + v_event_node->para_out); + } + unf_release_global_event(v_event_node); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_EVENT, UNF_WARN, + "[warn]handle global event(0x%x) failed", + event); + } + break; + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_WARN, + "[warn]Unknown event(0x%x)", event); + break; + } +} + +unsigned int unf_init_global_event_msg(void) +{ + struct unf_cm_event_report *event_node = NULL; + unsigned int ret = RETURN_OK; + unsigned int i = 0; + unsigned long flag = 0; + + INIT_LIST_HEAD(&global_event_queue.global_eventlist); + spin_lock_init(&global_event_queue.global_eventlist_lock); + global_event_queue.list_number = 0; + + global_event_queue.global_event_add = + vmalloc(UNF_MAX_GLOBAL_ENENT_NODE * + sizeof(struct unf_cm_event_report)); + if (!global_event_queue.global_event_add) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Can't allocate global event queue"); + + return UNF_RETURN_ERROR; + } + memset(global_event_queue.global_event_add, 0, + (sizeof(struct unf_cm_event_report) * + UNF_MAX_GLOBAL_ENENT_NODE)); + event_node = (struct unf_cm_event_report *) + (global_event_queue.global_event_add); + + spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag); + for (i = 0; i < UNF_MAX_GLOBAL_ENENT_NODE; i++) { + INIT_LIST_HEAD(&event_node->list_entry); + list_add_tail(&event_node->list_entry, + &global_event_queue.global_eventlist); + global_event_queue.list_number++; + event_node++; + } + spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock, + flag); + + return ret; +} + +void unf_destroy_global_event_msg(void) +{ + if (global_event_queue.list_number != UNF_MAX_GLOBAL_ENENT_NODE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_EVENT, UNF_CRITICAL, + "[warn]Global event release not complete with remain nodes(0x%x)", + global_event_queue.list_number); + } + + vfree(global_event_queue.global_event_add); +} + +unsigned int unf_schedule_global_event( + void *v_para, + unsigned int v_event_asy_flag, + int (*pfn_unf_event_task)(void *v_argin, void *v_argout)) +{ + struct list_head *list_node = NULL; + struct unf_cm_event_report *event_node = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x783, UNF_TRUE, pfn_unf_event_task, + return UNF_RETURN_ERROR); + + if ((v_event_asy_flag != UNF_GLOBAL_EVENT_ASYN) && + (v_event_asy_flag != UNF_GLOBAL_EVENT_SYN)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Event async flag(0x%x) abnormity", + v_event_asy_flag); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&global_event_queue.global_eventlist_lock, flag); + if (list_empty(&global_event_queue.global_eventlist)) { + spin_unlock_irqrestore( + &global_event_queue.global_eventlist_lock, flag); + + return UNF_RETURN_ERROR; + } + + list_node = (&global_event_queue.global_eventlist)->next; + list_del_init(list_node); + global_event_queue.list_number--; + event_node = list_entry(list_node, struct unf_cm_event_report, + list_entry); + spin_unlock_irqrestore(&global_event_queue.global_eventlist_lock, + flag); + + /* Initial global event */ + unf_init_event_node(event_node); + init_completion(&event_node->event_comp); + event_node->event_asy_flag = v_event_asy_flag; + event_node->pfn_unf_event_task = pfn_unf_event_task; + event_node->para_in = (void *)v_para; + event_node->para_out = NULL; + + unf_post_event(NULL, event_node); + + if (v_event_asy_flag == UNF_GLOBAL_EVENT_SYN) { + /* must wait for complete */ + wait_for_completion(&event_node->event_comp); + ret = event_node->result; + unf_release_global_event(event_node); + } else { + ret = RETURN_OK; + } + + return ret; +} + +struct unf_cm_event_report *unf_get_one_event_node(void *v_lport) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + + UNF_CHECK_VALID(0x785, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x786, UNF_TRUE, + lport->event_mgr.pfn_unf_get_free_event, + return NULL); + + return lport->event_mgr.pfn_unf_get_free_event((void *)lport); +} + +void unf_post_one_event_node(void *v_lport, + struct unf_cm_event_report *v_event) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + + UNF_CHECK_VALID(0x787, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x788, UNF_TRUE, v_event, return); + + UNF_CHECK_VALID(0x789, UNF_TRUE, lport->event_mgr.pfn_unf_post_event, + return); + UNF_CHECK_VALID(0x790, UNF_TRUE, v_event, return); + + lport->event_mgr.pfn_unf_post_event((void *)lport, v_event); +} + diff --git a/drivers/scsi/huawei/hifc/unf_event.h b/drivers/scsi/huawei/hifc/unf_event.h new file mode 100644 index 000000000000..4f78d1c538b8 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_event.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_EVENT_H__ +#define __UNF_EVENT_H__ +#include "hifc_knl_adp.h" + +enum unf_poll_flag { + UNF_POLL_CHIPERROR_FLAG = 0, /* CHIP ERROR POLL */ + UNF_POLL_ERROR_CODE, /* CODE ERROR POLL */ + UNF_POLL_SFP_FLAG, /* SFP POLL */ + UNF_POLL_BUTT +}; + +#define UNF_MAX_EVENT_NODE 256 + +enum unf_event_type { + UNF_EVENT_TYPE_ALARM = 0, /* Alarm */ + UNF_EVENT_TYPE_REQUIRE, /* Require */ + UNF_EVENT_TYPE_RECOVERY, /* Recovery */ + UNF_EVENT_TYPE_BUTT +}; + +struct unf_cm_event_report { + /* event type */ + unsigned int event; + + /* ASY flag */ + unsigned int event_asy_flag; + + /* Delay times,must be async event */ + unsigned int delay_times; + + struct list_head list_entry; + + void *lport; + + /* parameter */ + void *para_in; + void *para_out; + unsigned int result; + + /* recovery strategy */ + int (*pfn_unf_event_task)(void *v_argin, void *v_argout); + + /* recovery strategy */ + int (*pfn_unf_event_recovery_strategy)(void *); + + /* alarm strategy */ + int (*pfn_unf_event_alarm_strategy)(void *); + + struct completion event_comp; +}; + +struct unf_event_mgr { + spinlock_t port_event_lock; + unsigned int free_event_count; + + struct list_head list_free_event; + + struct completion *emg_completion; + + void *pmem_add; + struct unf_cm_event_report *(*pfn_unf_get_free_event)(void *v_lport); + void (*pfn_unf_release_event)(void *v_lport, void *v_event_node); + void (*pfn_unf_post_event)(void *v_lport, void *v_event_node); +}; + +struct unf_global_event_queue { + void *global_event_add; + unsigned int list_number; + struct list_head global_eventlist; + spinlock_t global_eventlist_lock; +}; + +struct unf_event_list { + struct list_head list_head; + spinlock_t fc_eventlist_lock; + unsigned int list_num; /* list node number */ +}; + +void unf_handle_event(struct unf_cm_event_report *v_event_node); +unsigned int unf_init_global_event_msg(void); +void unf_destroy_global_event_msg(void); +unsigned int unf_schedule_global_event( + void *v_para, + unsigned int v_event_asy_flag, + int (*pfn_unf_event_task)(void *v_argin, void *v_argout)); + +struct unf_cm_event_report *unf_get_one_event_node(void *v_lport); +void unf_post_one_event_node(void *v_lport, + struct unf_cm_event_report *v_event); +unsigned int unf_event_center_destroy(void *v_lport); +unsigned int unf_init_event_center(void *v_lport); + +extern struct task_struct *event_thread; +extern struct unf_global_event_queue global_event_queue; +extern struct unf_event_list fc_event_list; +#endif diff --git a/drivers/scsi/huawei/hifc/unf_exchg.c b/drivers/scsi/huawei/hifc/unf_exchg.c new file mode 100644 index 000000000000..f3234a9edc22 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_exchg.c @@ -0,0 +1,3632 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ + +#include "unf_log.h" +#include "unf_common.h" +#include "unf_exchg.h" +#include "unf_rport.h" +#include "unf_service.h" +#include "unf_io.h" + +#define UNF_DEL_XCHG_TIMER_SAFE(v_xchg) \ + do { \ + if (cancel_delayed_work(&((v_xchg)->timeout_work))) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, \ + UNF_MAJOR, \ + "Exchange(0x%p) is free, but timer is pending.", \ + v_xchg); \ + } else { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_IO_ATT, \ + UNF_CRITICAL, \ + "Exchange(0x%p) is free, but timer is running.", \ + v_xchg); \ + } \ + } while (0) + +#define UNF_XCHG_IS_ELS_REPLY(v_xchg) \ + ((((v_xchg)->cmnd_code & 0x0ffff) == ELS_ACC) || \ + (((v_xchg)->cmnd_code & 0x0ffff) == ELS_RJT)) + +static struct unf_ioflow_id_s io_stage[] = { + { "XCHG_ALLOC" }, + { "TGT_RECEIVE_ABTS" }, + { "TGT_ABTS_DONE" }, + { "TGT_IO_SRR" }, + { "SFS_RESPONSE" }, + { "SFS_TIMEOUT" }, + { "INI_SEND_CMND" }, + { "INI_RESPONSE_DONE" }, + { "INI_EH_ABORT" }, + { "INI_EH_DEVICE_RESET" }, + { "INI_EH_BLS_DONE" }, + { "INI_IO_TIMEOUT" }, + { "INI_REQ_TIMEOUT" }, + { "XCHG_CANCEL_TIMER" }, + { "XCHG_FREE_XCHG" }, + { "SEND_ELS" }, + { "IO_XCHG_WAIT" }, +}; + +void unf_wakeup_scsi_task_cmnd(struct unf_lport_s *v_lport) +{ + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long hot_pool_lock_flags = 0; + unsigned long xchg_flag = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned int i = 0; + + UNF_CHECK_VALID(0x850, UNF_TRUE, v_lport, return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + + if (!xchg_mgr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_EVENT, UNF_MINOR, + "Can't find LPort(0x%x) MgrIdx %u exchange manager.", + v_lport->port_id, i); + continue; + } + + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + list_for_each_safe(node, next_node, + &xchg_mgr->hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); + if (INI_IO_STATE_UPTASK & xchg->io_state && + (atomic_read(&xchg->ref_cnt) > 0)) { + UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS); + up(&xchg->task_sema); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_EVENT, UNF_MINOR, + "Wake up task command exchange(0x%p), Hot Pool Tag(0x%x).", + xchg, xchg->hot_pool_tag); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flag); + } + + spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + } +} + +void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, unsigned int v_did, + unsigned int v_extra_io_state) +{ + /* + * for target session: set ABORT + * 1. R_Port remove + * 2. Send PLOGI_ACC callback + * 3. RCVD PLOGI + * 4. RCVD LOGO + */ + UNF_CHECK_VALID(0x852, UNF_TRUE, v_lport, return); + + if (v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort) { + /* The SID/DID of the Xchg is in reverse direction in + * different phases. Therefore, the reverse direction + * needs to be considered + */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort( + v_lport, + v_rport, + v_sid, v_did, + v_extra_io_state); + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort( + v_lport, v_rport, + v_did, v_sid, + v_extra_io_state); + } +} + +void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, unsigned int v_did) +{ + UNF_CHECK_VALID(0x990, UNF_TRUE, v_lport, return); + + if (v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort) { + /* The SID/DID of the Xchg is in reverse direction in different + * phases, therefore, the reverse direction + * needs to be considered + */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort(v_lport, + v_rport, + v_sid, + v_did); + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort(v_lport, + v_rport, + v_did, + v_sid); + } +} + +void unf_cm_xchg_abort_by_lun(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned long long v_lun_id, + void *v_tm_xchg, + int v_abort_all_lun_flag) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + void (*unf_xchg_abort_by_lun)(void*, void*, unsigned long long, + void*, int) = NULL; + + UNF_CHECK_VALID(0x853, UNF_TRUE, v_lport, return); + + unf_xchg_abort_by_lun = + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_lun; + if (unf_xchg_abort_by_lun) { + unf_xchg_abort_by_lun((void *)v_lport, (void *)v_rport, + v_lun_id, v_tm_xchg, + v_abort_all_lun_flag); + } +} + +void unf_cm_xchg_abort_by_session(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + void (*pfn_unf_xchg_abort_by_session)(void*, void*) = NULL; + + UNF_CHECK_VALID(0x853, UNF_TRUE, v_lport, return); + + pfn_unf_xchg_abort_by_session = + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_session; + if (pfn_unf_xchg_abort_by_session) { + pfn_unf_xchg_abort_by_session((void *)v_lport, + (void *)v_rport); + } +} + +void *unf_cm_get_free_xchg(void *v_lport, unsigned int v_xchg_type) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x855, UNF_TRUE, unlikely(v_lport), return NULL); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + /* Find the corresponding Lport Xchg management template. */ + UNF_CHECK_VALID(0x856, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_xchg_get_free_and_init), + return NULL); + + return xch_mgr_temp->pfn_unf_xchg_get_free_and_init(lport, v_xchg_type, + INVALID_VALUE16); +} + +void unf_cm_free_xchg(void *v_lport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x857, UNF_TRUE, unlikely(v_lport), return); + UNF_CHECK_VALID(0x858, UNF_TRUE, unlikely(v_xchg), return); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + UNF_CHECK_VALID(0x859, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_xchg_release), + return); + + /* + * unf_cm_free_xchg --->>> unf_free_xchg + * --->>> unf_xchg_ref_dec --->>> unf_free_fcp_xchg + * --->>> unf_done_ini_xchg + */ + xch_mgr_temp->pfn_unf_xchg_release(v_lport, v_xchg); +} + +void *unf_cm_lookup_xchg_by_tag(void *v_lport, unsigned short v_hot_pool_tag) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x860, UNF_TRUE, unlikely(v_lport), return NULL); + + /* Find the corresponding Lport Xchg management template */ + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + UNF_CHECK_VALID(0x861, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_tag), + return NULL); + + return xch_mgr_temp->pfn_unf_look_up_xchg_by_tag(v_lport, + v_hot_pool_tag); +} + +void *unf_cm_lookup_xchg_by_id(void *v_lport, unsigned short v_ox_id, + unsigned int v_oid) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + + UNF_CHECK_VALID(0x862, UNF_TRUE, unlikely(v_lport), return NULL); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + /* Find the corresponding Lport Xchg management template */ + UNF_CHECK_VALID(0x863, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_id), + return NULL); + + return xch_mgr_temp->pfn_unf_look_up_xchg_by_id(v_lport, v_ox_id, + v_oid); +} + +struct unf_xchg_s *unf_cm_lookup_xchg_by_cmnd_sn( + void *v_lport, + unsigned long long v_command_sn, + unsigned int v_world_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_cm_xchg_mgr_template_s *xch_mgr_temp = NULL; + struct unf_xchg_s *xchg = NULL; + + UNF_CHECK_VALID(0x864, UNF_TRUE, unlikely(v_lport), return NULL); + + lport = (struct unf_lport_s *)v_lport; + xch_mgr_temp = &lport->xchg_mgr_temp; + + UNF_CHECK_VALID( + 0x865, UNF_TRUE, + unlikely(xch_mgr_temp->pfn_unf_look_up_xchg_by_cmnd_sn), + return NULL); + + xchg = + (struct unf_xchg_s *)xch_mgr_temp->pfn_unf_look_up_xchg_by_cmnd_sn( + lport, v_command_sn, + v_world_id); + + return xchg; +} + +static void unf_free_all_rsp_pages(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned int buff_index; + + UNF_CHECK_VALID(0x868, UNF_TRUE, v_xchg_mgr, return); + + if (v_xchg_mgr->rsp_buf_list.buflist) { + for (buff_index = 0; buff_index < + v_xchg_mgr->rsp_buf_list.buf_num; + buff_index++) { + if (v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr) { + dma_free_coherent( + &v_xchg_mgr->hot_pool->lport->low_level_func.dev->dev, + v_xchg_mgr->rsp_buf_list.buf_size, + v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr, + v_xchg_mgr->rsp_buf_list.buflist[buff_index].paddr); + v_xchg_mgr->rsp_buf_list.buflist[buff_index].vaddr = NULL; + } + } + + kfree(v_xchg_mgr->rsp_buf_list.buflist); + v_xchg_mgr->rsp_buf_list.buflist = NULL; + } +} + +static unsigned int unf_init_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr, + unsigned int v_xchg_sum, + unsigned int v_sfs_sum) +{ + struct unf_xchg_s *xchg_mem = NULL; + union unf_sfs_u *sfs_mm_start = NULL; + dma_addr_t sfs_dma_addr; + struct unf_xchg_s *xchg = NULL; + struct unf_xchg_free_pool_s *free_pool = NULL; + unsigned int rsp_iu_nums_per_page = 0; + unsigned int rsp_iu_size = 0; + unsigned long flags = 0; + unsigned int xchg_sum = 0; + unsigned int i = 0; + unsigned int rsp_iu_loop = 0; + unsigned int buf_num; + unsigned int buf_size; + unsigned int curbuf_idx = 0; + void *page_addr; + dma_addr_t phy_addr; + + UNF_CHECK_VALID(0x871, UNF_TRUE, v_sfs_sum <= v_xchg_sum, + return UNF_RETURN_ERROR); + + free_pool = &v_xchg_mgr->free_pool; + xchg_sum = v_xchg_sum; + xchg_mem = v_xchg_mgr->fcp_mm_start; + xchg = xchg_mem; + + sfs_mm_start = (union unf_sfs_u *)v_xchg_mgr->sfs_mm_start; + sfs_dma_addr = v_xchg_mgr->sfs_phy_addr; + /* 1. Allocate the SFS UNION memory to each SFS XCHG + * and mount the SFS XCHG to the corresponding FREE linked list + */ + free_pool->total_sfs_xchg = 0; + free_pool->sfs_xchg_sum = v_sfs_sum; + for (i = 0; i < v_sfs_sum; i++) { + INIT_LIST_HEAD(&xchg->list_xchg_entry); + INIT_LIST_HEAD(&xchg->list_esgls); + spin_lock_init(&xchg->xchg_state_lock); + sema_init(&xchg->task_sema, 0); + sema_init(&xchg->echo_info.echo_sync_sema, 0); + + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags); + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr = sfs_mm_start; + xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr = sfs_dma_addr; + xchg->fcp_sfs_union.sfs_entry.sfs_buff_len = + sizeof(*sfs_mm_start); + list_add_tail(&xchg->list_xchg_entry, + &free_pool->list_sfs_xchg_list); + free_pool->total_sfs_xchg++; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + sfs_mm_start++; + sfs_dma_addr = sfs_dma_addr + sizeof(union unf_sfs_u); + xchg++; + } + + /* + * 2. Allocate RSP IU memory for each IO XCHG and mount IO + * XCHG to the corresponding FREE linked list + * The memory size of each RSP IU is rsp_iu_size. + */ + rsp_iu_size = (UNF_FCPRSP_CTL_LEN + UNF_MAX_RSP_INFO_LEN + + UNF_SCSI_SENSE_DATA_LEN); + + buf_size = BUF_LIST_PAGE_SIZE; + if ((xchg_sum - v_sfs_sum) * rsp_iu_size < BUF_LIST_PAGE_SIZE) + buf_size = (xchg_sum - v_sfs_sum) * rsp_iu_size; + + rsp_iu_nums_per_page = buf_size / rsp_iu_size; + buf_num = (xchg_sum - v_sfs_sum) % rsp_iu_nums_per_page ? + (xchg_sum - v_sfs_sum) / rsp_iu_nums_per_page + 1 : + (xchg_sum - v_sfs_sum) / rsp_iu_nums_per_page; + + v_xchg_mgr->rsp_buf_list.buflist = + (struct buff_list_s *)kmalloc( + buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + v_xchg_mgr->rsp_buf_list.buf_num = buf_num; + v_xchg_mgr->rsp_buf_list.buf_size = buf_size; + + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) buff num 0x%x buff size 0x%x", + v_lport->port_id, buf_num, + v_xchg_mgr->rsp_buf_list.buf_size); + + if (!v_xchg_mgr->rsp_buf_list.buflist) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate BigSfs pool buf list failed out of memory"); + goto free_buff; + } + memset(v_xchg_mgr->rsp_buf_list.buflist, 0, + buf_num * sizeof(struct buff_list_s)); + + free_pool->total_fcp_xchg = 0; + for (i = 0, curbuf_idx = 0; curbuf_idx < buf_num; curbuf_idx++) { + page_addr = dma_alloc_coherent( + &v_lport->low_level_func.dev->dev, + v_xchg_mgr->rsp_buf_list.buf_size, + &phy_addr, GFP_KERNEL); + if (!page_addr) + goto free_buff; + + memset(page_addr, 0, v_xchg_mgr->rsp_buf_list.buf_size); + v_xchg_mgr->rsp_buf_list.buflist[curbuf_idx].vaddr = page_addr; + v_xchg_mgr->rsp_buf_list.buflist[curbuf_idx].paddr = phy_addr; + + for (rsp_iu_loop = 0; + (rsp_iu_loop < rsp_iu_nums_per_page && + i < xchg_sum - v_sfs_sum); rsp_iu_loop++) { + INIT_LIST_HEAD(&xchg->list_xchg_entry); + + INIT_LIST_HEAD(&xchg->list_esgls); + spin_lock_init(&xchg->xchg_state_lock); + sema_init(&xchg->task_sema, 0); + sema_init(&xchg->echo_info.echo_sync_sema, 0); + + /* alloc dma buffer for fcp_rsp_iu */ + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, + flags); + xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu = + (struct unf_fcprsp_iu_s *)page_addr; + xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu_phy_addr = + phy_addr; + list_add_tail(&xchg->list_xchg_entry, + &free_pool->list_free_xchg_list); + free_pool->total_fcp_xchg++; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, + flags); + + page_addr += rsp_iu_size; + phy_addr += rsp_iu_size; + i++; + xchg++; + } + } + + free_pool->fcp_xchg_sum = free_pool->total_fcp_xchg; + + return RETURN_OK; +free_buff: + unf_free_all_rsp_pages(v_xchg_mgr); + return UNF_RETURN_ERROR; +} + +static unsigned int unf_get_xchg_config_sum(struct unf_lport_s *v_lport, + unsigned int *v_xchg_sum) +{ + struct unf_lport_cfg_item_s *lport_cfg_items = NULL; + + lport_cfg_items = &v_lport->low_level_func.lport_cfg_items; + + /* It has been checked at the bottom layer. + * Don't need to check it again. + */ + *v_xchg_sum = lport_cfg_items->max_sfs_xchg + lport_cfg_items->max_io; + if ((*v_xchg_sum / UNF_EXCHG_MGR_NUM) == 0 || + lport_cfg_items->max_sfs_xchg / UNF_EXCHG_MGR_NUM == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "[err]Port(0x%x) Xchgsum(%u) or SfsXchg(%u) is less than ExchangeMgrNum(%u).", + v_lport->port_id, *v_xchg_sum, + lport_cfg_items->max_sfs_xchg, + UNF_EXCHG_MGR_NUM); + return UNF_RETURN_ERROR; + } + + if (*v_xchg_sum > (INVALID_VALUE16 - 1)) { + /* If the format of ox_id/rx_id is exceeded, + * this function is not supported + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) Exchange num(0x%x) is Too Big.", + v_lport->port_id, *v_xchg_sum); + + return UNF_RETURN_ERROR; + } + + return RETURN_OK; +} + +static void unf_xchg_cancel_timer(void *v_xchg) +{ + struct unf_xchg_s *xchg = NULL; + int need_dec_xchg_ref = UNF_FALSE; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x874, UNF_TRUE, v_xchg, return); + xchg = (struct unf_xchg_s *)v_xchg; + + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + if (cancel_delayed_work(&xchg->timeout_work)) + need_dec_xchg_ref = UNF_TRUE; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); + + if (need_dec_xchg_ref == UNF_TRUE) + unf_xchg_ref_dec(v_xchg, XCHG_CANCEL_TIMER); +} + +void unf_show_all_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x879, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x880, UNF_TRUE, v_xchg_mgr, return); + + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(xchg); + + xchg_mgr = v_xchg_mgr; + lport = v_lport; + + /* hot Xchg */ + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "INI busy :"); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->ini_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + atomic_read(&xchg->ref_cnt), + (unsigned int)xchg->io_state, + xchg->alloc_jif); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, + UNF_WARN, "SFS :"); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->sfs_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "0x%p---0x%x---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + xchg->cmnd_code, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + atomic_read(&xchg->ref_cnt), + (unsigned int)xchg->io_state, + xchg->alloc_jif); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "Destroy list."); + list_for_each_safe(xchg_node, next_xchg_node, + &xchg_mgr->hot_pool->list_destroy_xchg) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_NORMAL, UNF_WARN, + "0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + atomic_read(&xchg->ref_cnt), + (unsigned int)xchg->io_state, + xchg->alloc_jif); + } + spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hot_pool_lock, flags); + + UNF_REFERNCE_VAR(xchg); + UNF_REFERNCE_VAR(lport); +} + +static void unf_delay_work_del_syn(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_s *xchg = NULL; + + UNF_CHECK_VALID(0x884, UNF_TRUE, v_xchg, return); + + xchg = v_xchg; + + /* synchronous release timer */ + if (!cancel_delayed_work_sync(&xchg->timeout_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Exchange(0x%p), State(0x%x) can't delete work timer, timer is running or no timer.", + xchg, xchg->io_state); + } else { + /* The reference count cannot be directly subtracted. + * This prevents the XCHG from being moved to the + * Free linked list when the card is unloaded. + */ + unf_cm_free_xchg(xchg->lport, xchg); + } +} + +static void unf_free_lport_sfs_xchg(struct unf_xchg_mgr_s *v_xchg_mgr, + int v_done_ini_flag) +{ + struct list_head *list = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long hot_pool_lock_flags = 0; + + UNF_REFERNCE_VAR(v_done_ini_flag); + UNF_CHECK_VALID(0x887, UNF_TRUE, v_xchg_mgr, return); + UNF_CHECK_VALID(0x888, UNF_TRUE, v_xchg_mgr->hot_pool, return); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + while (!list_empty(&v_xchg_mgr->hot_pool->sfs_busylist)) { + list = (&v_xchg_mgr->hot_pool->sfs_busylist)->next; + list_del_init(list); + + /* Prevent the xchg of the sfs from being accessed repeatedly. + * The xchg is first mounted to the destroy linked list. + */ + list_add_tail(list, &v_xchg_mgr->hot_pool->list_destroy_xchg); + + xchg = list_entry(list, struct unf_xchg_s, list_xchg_entry); + spin_unlock_irqrestore( + &v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + unf_delay_work_del_syn(xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Free SFS Exchange(0x%p), State(0x%x), Reference count(%d), Start time(%llu).", + xchg, xchg->io_state, atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + + unf_cm_free_xchg(xchg->lport, xchg); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + } + spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); +} + +static void unf_free_lport_destroy_xchg(struct unf_xchg_mgr_s *v_xchg_mgr) +{ +#define UNF_WAIT_DESTROY_EMPTY_STEP_MS 1000 +#define UNF_WAIT_IO_STATE_TGT_FRONT_MS (10 * 1000) + + struct unf_xchg_s *xchg = NULL; + struct list_head *next_xchg_node = NULL; + unsigned long hot_pool_lock_flags = 0; + unsigned long xchg_flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg_mgr, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, v_xchg_mgr->hot_pool, + return); + + /* In this case, the timer on the destroy linked list is deleted. + * You only need to check whether the timer is released + * at the end of the tgt. + */ + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + while (!list_empty(&v_xchg_mgr->hot_pool->list_destroy_xchg)) { + next_xchg_node = + (&v_xchg_mgr->hot_pool->list_destroy_xchg)->next; + xchg = list_entry(next_xchg_node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Free Exchange(0x%p), Type(0x%x), State(0x%x), Reference count(%d), Start time(%llu)", + xchg, xchg->xchg_type, xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + + spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); + spin_unlock_irqrestore( + &v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + + /* This interface can be invoked to ensure that + * the timer is successfully canceled + * or wait until the timer execution is complete + */ + unf_delay_work_del_syn(xchg); + + /* + * If the timer is canceled successfully, delete Xchg + * If the timer has burst, the Xchg may have been released, + * In this case, deleting the Xchg will be failed + */ + unf_cm_free_xchg(xchg->lport, xchg); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + }; + + spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); +} + +static unsigned int unf_free_lport_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ +#define UNF_OS_WAITIO_TIMEOUT (10 * 1000) + + unsigned long free_pool_lock_flags = 0; + int wait = UNF_FALSE; + unsigned int total_xchg = 0; + unsigned int total_xchg_sum = 0; + unsigned int ret = RETURN_OK; + unsigned long long timeout = 0; + + struct completion xchg_mgr_completion = + COMPLETION_INITIALIZER(xchg_mgr_completion); + + UNF_CHECK_VALID(0x881, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x882, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x883, UNF_TRUE, v_xchg_mgr->hot_pool, + return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_lport); + + unf_free_lport_sfs_xchg(v_xchg_mgr, UNF_FALSE); + + /* free INI Mode exchanges belong to L_Port */ + unf_free_lport_ini_xchg(v_xchg_mgr, UNF_FALSE); + + spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + total_xchg = v_xchg_mgr->free_pool.total_fcp_xchg + + v_xchg_mgr->free_pool.total_sfs_xchg; + total_xchg_sum = v_xchg_mgr->free_pool.fcp_xchg_sum + + v_xchg_mgr->free_pool.sfs_xchg_sum; + if (total_xchg != total_xchg_sum) { + v_xchg_mgr->free_pool.xchg_mgr_completion = + &xchg_mgr_completion; + wait = UNF_TRUE; + } + spin_unlock_irqrestore(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + + if (wait == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) begin to wait for exchange manager completion(%ld) (0x%x:0x%x)", + v_lport->port_id, jiffies, total_xchg, + total_xchg_sum); + + unf_show_all_xchg(v_lport, v_xchg_mgr); + + timeout = wait_for_completion_timeout( + v_xchg_mgr->free_pool.xchg_mgr_completion, + msecs_to_jiffies(UNF_OS_WAITIO_TIMEOUT)); + if (timeout == 0) + unf_free_lport_destroy_xchg(v_xchg_mgr); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) wait for exchange manager completion end", + v_lport->port_id); + + spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + v_xchg_mgr->free_pool.xchg_mgr_completion = NULL; + spin_unlock_irqrestore( + &v_xchg_mgr->free_pool.xchg_free_pool_lock, + free_pool_lock_flags); + } + + return ret; +} + +void unf_free_lport_all_xchg(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr; + unsigned int i; + + UNF_CHECK_VALID(0x881, UNF_TRUE, v_lport, return); + UNF_REFERNCE_VAR(v_lport); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + v_lport->port_id); + + continue; + } + unf_free_lport_sfs_xchg(xchg_mgr, UNF_FALSE); + + /* free INI Mode exchanges belong to L_Port */ + unf_free_lport_ini_xchg(xchg_mgr, UNF_FALSE); + + unf_free_lport_destroy_xchg(xchg_mgr); + } +} + +void unf_free_lport_ini_xchg(struct unf_xchg_mgr_s *v_xchg_mgr, + int v_done_ini_flag) +{ + /* + * 1. L_Port destroy + * 2. AC power down + */ + struct list_head *list = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long hot_pool_lock_flags = 0; + unsigned int up_status = 0; + + UNF_REFERNCE_VAR(v_done_ini_flag); + UNF_CHECK_VALID(0x889, UNF_TRUE, v_xchg_mgr, return); + UNF_CHECK_VALID(0x890, UNF_TRUE, v_xchg_mgr->hot_pool, return); + + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + while (!list_empty(&v_xchg_mgr->hot_pool->ini_busylist)) { + /* for each INI busy_list (exchange) node */ + list = (&v_xchg_mgr->hot_pool->ini_busylist)->next; + + /* Put exchange node to destroy_list, prevent done repeatly */ + list_del_init(list); + list_add_tail(list, &v_xchg_mgr->hot_pool->list_destroy_xchg); + xchg = list_entry(list, struct unf_xchg_s, list_xchg_entry); + if (atomic_read(&xchg->ref_cnt) <= 0) + continue; + spin_unlock_irqrestore( + &v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + unf_delay_work_del_syn(xchg); + + /* In the case of INI done, the command should be set to fail + * to prevent data inconsistency caused by the return of OK + */ + up_status = unf_get_uplevel_cmnd_errcode( + xchg->scsi_cmnd_info.err_code_table, + xchg->scsi_cmnd_info.err_code_table_cout, + UNF_IO_PORT_LOGOUT); + + if (xchg->io_state & INI_IO_STATE_UPABORT) { + /* + * About L_Port destroy or AC power down: + * UP_ABORT ---to--->>> ABORT_Port_Removing + */ + up_status = UNF_IO_ABORT_PORT_REMOVING; + } + + xchg->scsi_cmnd_info.result = up_status; + up(&xchg->task_sema); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Free INI exchange(0x%p) state(0x%x) reference count(%d) start time(%llu)", + xchg, xchg->io_state, atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + + unf_cm_free_xchg(xchg->lport, xchg); + + /* go to next INI busy_list (exchange) node */ + spin_lock_irqsave(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); + } + spin_unlock_irqrestore(&v_xchg_mgr->hot_pool->xchg_hot_pool_lock, + hot_pool_lock_flags); +} + +static void unf_free_all_big_sfs(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_mgr_s *xchg_mgr = v_xchg_mgr; + struct unf_big_sfs_s *big_sfs = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + unsigned int buff_index; + + UNF_CHECK_VALID(0x891, UNF_TRUE, xchg_mgr, return); + + /* Release the free resources in the busy state */ + spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag); + list_for_each_safe(node, next_node, + &xchg_mgr->st_big_sfs_pool.list_busy_pool) { + list_del(node); + list_add_tail(node, &xchg_mgr->st_big_sfs_pool.list_free_pool); + } + + list_for_each_safe(node, next_node, + &xchg_mgr->st_big_sfs_pool.list_free_pool) { + list_del(node); + big_sfs = list_entry(node, struct unf_big_sfs_s, + entry_big_sfs); + if (big_sfs->vaddr) + big_sfs->vaddr = NULL; + } + spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); + + if (xchg_mgr->big_sfs_buf_list.buflist) { + for (buff_index = 0; + buff_index < xchg_mgr->big_sfs_buf_list.buf_num; + buff_index++) { + if (xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr) { + kfree(xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr); + xchg_mgr->big_sfs_buf_list.buflist[buff_index].vaddr = NULL; + } + } + + kfree(xchg_mgr->big_sfs_buf_list.buflist); + xchg_mgr->big_sfs_buf_list.buflist = NULL; + } +} + +static void unf_free_big_sfs_pool(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + UNF_CHECK_VALID(0x892, UNF_TRUE, v_xchg_mgr, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Free Big SFS Pool, Count(0x%x).", + v_xchg_mgr->st_big_sfs_pool.free_count); + + unf_free_all_big_sfs(v_xchg_mgr); + v_xchg_mgr->st_big_sfs_pool.free_count = 0; + + if (v_xchg_mgr->st_big_sfs_pool.big_sfs_pool) { + vfree(v_xchg_mgr->st_big_sfs_pool.big_sfs_pool); + v_xchg_mgr->st_big_sfs_pool.big_sfs_pool = NULL; + } +} + +static void unf_free_xchg_mgr_mem(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int i = 0; + unsigned int xchg_sum = 0; + struct unf_xchg_free_pool_s *free_pool = NULL; + + UNF_CHECK_VALID(0x893, UNF_TRUE, v_xchg_mgr, return); + + xchg_mgr = v_xchg_mgr; + + /* Release the reserved Rsp IU Page */ + unf_free_all_rsp_pages(xchg_mgr); + + unf_free_big_sfs_pool(xchg_mgr); + + /* The sfs is released first, and the XchgMgr is allocated + * by the get free page. + * Therefore, the XchgMgr is compared with the '0' + */ + if (xchg_mgr->sfs_mm_start != 0) { + dma_free_coherent(&v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + } + + /* Release Xchg first */ + if (xchg_mgr->fcp_mm_start) { + unf_get_xchg_config_sum(v_lport, &xchg_sum); + xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM; + + xchg = xchg_mgr->fcp_mm_start; + for (i = 0; i < xchg_sum; i++) { + if (!xchg) + break; + xchg++; + } + + vfree(xchg_mgr->fcp_mm_start); + xchg_mgr->fcp_mm_start = NULL; + } + + /* release the hot pool */ + if (xchg_mgr->hot_pool) { + vfree(xchg_mgr->hot_pool); + xchg_mgr->hot_pool = NULL; + } + + free_pool = &xchg_mgr->free_pool; + + vfree(xchg_mgr); + + UNF_REFERNCE_VAR(xchg_mgr); + UNF_REFERNCE_VAR(free_pool); +} + +static void unf_free_xchg_mgr(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x894, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x895, UNF_TRUE, v_xchg_mgr, return); + + /* 1. At first, free exchanges for this Exch_Mgr */ + ret = unf_free_lport_xchg(v_lport, v_xchg_mgr); + + /* 2. Delete this Exch_Mgr entry */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_del_init(&v_xchg_mgr->xchg_mgr_entry); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + /* 3. free Exch_Mgr memory if necessary */ + if (ret == RETURN_OK) { + /* free memory directly */ + unf_free_xchg_mgr_mem(v_lport, v_xchg_mgr); + } else { + /* Add it to Dirty list */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_add_tail(&v_xchg_mgr->xchg_mgr_entry, + &v_lport->list_dirty_xchg_mgr_head); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + /* Mark dirty flag */ + unf_cmmark_dirty_mem(v_lport, + UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY); + } +} + +void unf_free_all_xchg_mgr(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x896, UNF_TRUE, v_lport, return); + + /* for each L_Port->Exch_Mgr_List */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + while (!list_empty(&v_lport->list_xchg_mgr_head)) { + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + unf_free_xchg_mgr(v_lport, xchg_mgr); + if (i < UNF_EXCHG_MGR_NUM) + v_lport->p_xchg_mgr[i] = NULL; + + i++; + /* go to next */ + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + } + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR; +} + +static unsigned int unf_init_xchg_mgr(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x897, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + xchg_mgr = v_xchg_mgr; + memset(xchg_mgr, 0, sizeof(struct unf_xchg_mgr_s)); + + INIT_LIST_HEAD(&xchg_mgr->xchg_mgr_entry); + xchg_mgr->mgr_type = UNF_XCHG_MGR_FC; + xchg_mgr->min_xid = UNF_XCHG_MIN_XID; + xchg_mgr->max_xid = UNF_XCHG_MAX_XID; + xchg_mgr->fcp_mm_start = NULL; + xchg_mgr->mem_size = sizeof(struct unf_xchg_mgr_s); + return RETURN_OK; +} + +static unsigned int unf_init_xchg_mgr_free_pool( + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x898, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + xchg_mgr = v_xchg_mgr; + + free_pool = &xchg_mgr->free_pool; + INIT_LIST_HEAD(&free_pool->list_free_xchg_list); + INIT_LIST_HEAD(&free_pool->list_sfs_xchg_list); + spin_lock_init(&free_pool->xchg_free_pool_lock); + free_pool->fcp_xchg_sum = 0; + free_pool->xchg_mgr_completion = NULL; + + return RETURN_OK; +} + +static unsigned int unf_init_xchg_hot_pool( + struct unf_lport_s *v_lport, + struct unf_xchg_hot_pool_s *v_hot_pool, + unsigned int v_xchg_sum) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + + UNF_CHECK_VALID(0x899, UNF_TRUE, v_hot_pool, return UNF_RETURN_ERROR); + hot_pool = v_hot_pool; + + INIT_LIST_HEAD(&hot_pool->sfs_busylist); + INIT_LIST_HEAD(&hot_pool->ini_busylist); + spin_lock_init(&hot_pool->xchg_hot_pool_lock); + INIT_LIST_HEAD(&hot_pool->list_destroy_xchg); + hot_pool->total_xchges = 0; + hot_pool->total_res_cnt = 0; + hot_pool->wait_state = UNF_FALSE; + hot_pool->lport = v_lport; + + /* Slab Pool Index */ + hot_pool->slab_next_index = 0; + UNF_TOU16_CHECK(hot_pool->slab_total_sum, v_xchg_sum, + return UNF_RETURN_ERROR); + + return RETURN_OK; +} + +static unsigned int unf_alloc_and_init_big_sfs_pool( + struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned int i = 0; + unsigned int size = 0; + unsigned int align_size = 0; + unsigned int npiv_cnt = 0; + struct unf_big_sfs_pool_s *big_sfs_pool = NULL; + struct unf_big_sfs_s *big_sfs_buf = NULL; + unsigned int buf_total_size; + unsigned int buf_num; + unsigned int buf_cnt_perhugebuf; + unsigned int alloc_idx; + unsigned int curbuf_idx = 0; + unsigned int curbuf_offset = 0; + + UNF_CHECK_VALID(0x900, UNF_TRUE, v_xchg_mgr, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x901, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + big_sfs_pool = &v_xchg_mgr->st_big_sfs_pool; + + INIT_LIST_HEAD(&big_sfs_pool->list_free_pool); + INIT_LIST_HEAD(&big_sfs_pool->list_busy_pool); + spin_lock_init(&big_sfs_pool->big_sfs_pool_lock); + npiv_cnt = v_lport->low_level_func.support_max_npiv_num; + + /* + * The value*6 indicates GID_PT/GID_FT, RSCN, and ECHO + * Another command is received when a command is being responded + * A maximum of 20 resources are reserved for the RSCN. + * During the test, multiple rscn are found. As a result, + * the resources are insufficient and the disc fails. + */ + big_sfs_pool->free_count = (npiv_cnt + 1) * 6 + 20; + big_sfs_buf = (struct unf_big_sfs_s *)vmalloc( + big_sfs_pool->free_count + * sizeof(struct unf_big_sfs_s)); + if (!big_sfs_buf) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_REG_ATT, UNF_ERR, + "Allocate Big SFS buf fail."); + + return UNF_RETURN_ERROR; + } + memset(big_sfs_buf, 0, big_sfs_pool->free_count * + sizeof(struct unf_big_sfs_s)); + v_xchg_mgr->mem_size += + (unsigned int) + (big_sfs_pool->free_count * sizeof(struct unf_big_sfs_s)); + big_sfs_pool->big_sfs_pool = (void *)big_sfs_buf; + + /* + * Use the larger value of sizeof (struct unf_gif_acc_pld_s) and + * sizeof (struct unf_rscn_pld_s) to avoid the icp error.Therefore, + * the value is directly assigned instead of being compared. + */ + size = sizeof(struct unf_gif_acc_pld_s); + align_size = ALIGN(size, PAGE_SIZE); + + buf_total_size = align_size * big_sfs_pool->free_count; + + v_xchg_mgr->big_sfs_buf_list.buf_size = + buf_total_size > BUF_LIST_PAGE_SIZE ? + BUF_LIST_PAGE_SIZE : buf_total_size; + buf_cnt_perhugebuf = + v_xchg_mgr->big_sfs_buf_list.buf_size / align_size; + buf_num = + big_sfs_pool->free_count % buf_cnt_perhugebuf ? + big_sfs_pool->free_count / buf_cnt_perhugebuf + 1 : + big_sfs_pool->free_count / buf_cnt_perhugebuf; + + v_xchg_mgr->big_sfs_buf_list.buflist = + (struct buff_list_s *)kmalloc( + buf_num * sizeof(struct buff_list_s), + GFP_KERNEL); + v_xchg_mgr->big_sfs_buf_list.buf_num = buf_num; + + if (!v_xchg_mgr->big_sfs_buf_list.buflist) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[err]Allocate BigSfs pool buf list failed out of memory"); + goto free_buff; + } + memset(v_xchg_mgr->big_sfs_buf_list.buflist, 0, buf_num * + sizeof(struct buff_list_s)); + for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { + v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr = + kmalloc(v_xchg_mgr->big_sfs_buf_list.buf_size, + GFP_ATOMIC); + if (!v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr) + goto free_buff; + + memset(v_xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr, + 0, v_xchg_mgr->big_sfs_buf_list.buf_size); + } + + for (i = 0; i < big_sfs_pool->free_count; i++) { + if ((i != 0) && !(i % buf_cnt_perhugebuf)) + curbuf_idx++; + + curbuf_offset = align_size * (i % buf_cnt_perhugebuf); + big_sfs_buf->vaddr = + v_xchg_mgr->big_sfs_buf_list.buflist[curbuf_idx].vaddr + + curbuf_offset; + big_sfs_buf->size = size; + v_xchg_mgr->mem_size += size; + list_add_tail(&big_sfs_buf->entry_big_sfs, + &big_sfs_pool->list_free_pool); + big_sfs_buf++; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[EVENT]Allocate BigSfs pool size:%d,uiAlignSize:%d,buf_num:%d,buf_size:%d", + size, align_size, v_xchg_mgr->big_sfs_buf_list.buf_num, + v_xchg_mgr->big_sfs_buf_list.buf_size); + return RETURN_OK; +free_buff: + unf_free_all_big_sfs(v_xchg_mgr); + vfree(big_sfs_buf); + big_sfs_pool->big_sfs_pool = NULL; + return UNF_RETURN_ERROR; +} + +/* + * Function Name : unf_free_one_big_sfs + * Function Description: Put the big sfs memory in xchg back to bigsfspool + * Input Parameters : struct unf_xchg_s * v_xchg + * Output Parameters : N/A + * Return Type : static void + */ +static void unf_free_one_big_sfs(struct unf_xchg_s *v_xchg) +{ + unsigned long flag = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x902, UNF_TRUE, v_xchg, return); + xchg_mgr = v_xchg->xchg_mgr; + UNF_CHECK_VALID(0x903, UNF_TRUE, xchg_mgr, return); + if (!v_xchg->big_sfs_buf) + return; + + if ((v_xchg->cmnd_code != NS_GID_PT) && + (v_xchg->cmnd_code != NS_GID_FT) && + (v_xchg->cmnd_code != ELS_ECHO) && + (UNF_SET_ELS_ACC_TYPE(ELS_ECHO) != v_xchg->cmnd_code) && + (v_xchg->cmnd_code != ELS_RSCN) && + (UNF_SET_ELS_ACC_TYPE(ELS_RSCN) != v_xchg->cmnd_code)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "Exchange(0x%p), Command(0x%x) big SFS buf is not NULL.", + v_xchg, v_xchg->cmnd_code); + } + + spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag); + list_del(&v_xchg->big_sfs_buf->entry_big_sfs); + list_add_tail(&v_xchg->big_sfs_buf->entry_big_sfs, + &xchg_mgr->st_big_sfs_pool.list_free_pool); + xchg_mgr->st_big_sfs_pool.free_count++; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "Free one big SFS buf(0x%p), Count(0x%x), Exchange(0x%p), Command(0x%x).", + v_xchg->big_sfs_buf->vaddr, + xchg_mgr->st_big_sfs_pool.free_count, + v_xchg, v_xchg->cmnd_code); + spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); +} + +static void unf_free_exchg_mgr_info(struct unf_lport_s *v_lport) +{ + unsigned int i; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flags = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_for_each_safe(node, next_node, &v_lport->list_xchg_mgr_head) { + list_del(node); + xchg_mgr = list_entry(node, struct unf_xchg_mgr_s, + xchg_mgr_entry); + } + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = v_lport->p_xchg_mgr[i]; + + if (xchg_mgr) { + unf_free_big_sfs_pool(xchg_mgr); + unf_free_all_rsp_pages(xchg_mgr); + + if (xchg_mgr->sfs_mm_start) { + dma_free_coherent( + &v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + } + + if (xchg_mgr->fcp_mm_start) { + vfree(xchg_mgr->fcp_mm_start); + xchg_mgr->fcp_mm_start = NULL; + } + + if (xchg_mgr->hot_pool) { + vfree(xchg_mgr->hot_pool); + xchg_mgr->hot_pool = NULL; + } + + vfree(xchg_mgr); + v_lport->p_xchg_mgr[i] = NULL; + } + } +} + +static unsigned int unf_alloc_and_init_xchg_mgr(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg_mem = NULL; + void *sfs_mm_start = 0; + dma_addr_t sfs_phy_addr = 0; + unsigned int xchg_sum = 0; + unsigned int sfs_xchg_sum = 0; + unsigned long flags = 0; + unsigned int order = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int slab_num = 0; + unsigned int i = 0; + + UNF_REFERNCE_VAR(order); + /* SFS_EXCH + I/O_EXCH */ + ret = unf_get_xchg_config_sum(v_lport, &xchg_sum); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) can't get Exchange.", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + /* SFS Exchange Sum */ + sfs_xchg_sum = v_lport->low_level_func.lport_cfg_items.max_sfs_xchg / + UNF_EXCHG_MGR_NUM; + + xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM; + slab_num = v_lport->low_level_func.support_max_xid_range / + UNF_EXCHG_MGR_NUM; + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + /* Alloc Exchange Manager */ + xchg_mgr = (struct unf_xchg_mgr_s *) + vmalloc(sizeof(struct unf_xchg_mgr_s)); + if (!xchg_mgr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate Exchange Manager Memory Fail.", + v_lport->port_id); + + goto exit; + } + + /* Init Exchange Manager */ + ret = unf_init_xchg_mgr(xchg_mgr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) initialization Exchange Manager unsuccessful.", + v_lport->port_id); + + goto free_xchg_mgr; + } + + /* Initialize the Exchange Free Pool resource */ + ret = unf_init_xchg_mgr_free_pool(xchg_mgr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) initialization Exchange Manager Free Pool unsuccessful.", + v_lport->port_id); + + goto free_xchg_mgr; + } + + /* Allocate memory for Hot Pool and Xchg slab */ + hot_pool = vmalloc(sizeof(struct unf_xchg_hot_pool_s) + + sizeof(struct unf_xchg_s *) * slab_num); + if (!hot_pool) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate Hot Pool Memory Fail.", + v_lport->port_id); + goto free_xchg_mgr; + } + + memset(hot_pool, 0, + sizeof(struct unf_xchg_hot_pool_s) + + sizeof(struct unf_xchg_s *) * slab_num); + xchg_mgr->mem_size += + (unsigned int)(sizeof(struct unf_xchg_hot_pool_s) + + sizeof(struct unf_xchg_s *) * slab_num); + + /* Initialize the Exchange Hot Pool resource */ + ret = unf_init_xchg_hot_pool(v_lport, hot_pool, slab_num); + if (ret != RETURN_OK) + goto free_hot_pool; + + hot_pool->base += (unsigned short)(i * slab_num); + /* Allocate the memory of all Xchg (IO/SFS) */ + xchg_mem = vmalloc(sizeof(struct unf_xchg_s) * xchg_sum); + if (!xchg_mem) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate Exchange Memory Fail.", + v_lport->port_id); + goto free_hot_pool; + } + memset(xchg_mem, 0, sizeof(struct unf_xchg_s) * xchg_sum); + xchg_mgr->mem_size += + (unsigned int)(sizeof(struct unf_xchg_s) * xchg_sum); + + xchg_mgr->hot_pool = hot_pool; + xchg_mgr->fcp_mm_start = xchg_mem; + + /* Allocate the memory used by the SFS Xchg + * to carry the ELS/BLS/GS command and response + */ + xchg_mgr->sfs_mem_size = + (unsigned int)(sizeof(union unf_sfs_u) * sfs_xchg_sum); + + /* Apply for the DMA space for sending sfs frames. + * If the value of DMA32 is less than 4 GB, + * cross-4G problems will not occur + */ + order = (unsigned int)get_order(xchg_mgr->sfs_mem_size); + + sfs_mm_start = dma_alloc_coherent( + &v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + &sfs_phy_addr, GFP_KERNEL); + if (!sfs_mm_start) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) Get Free Pagers Fail, Order(%u).", + v_lport->port_id, order); + goto free_xchg_mem; + } + memset(sfs_mm_start, 0, sizeof(union unf_sfs_u) * sfs_xchg_sum); + xchg_mgr->mem_size += xchg_mgr->sfs_mem_size; + xchg_mgr->sfs_mm_start = sfs_mm_start; + xchg_mgr->sfs_phy_addr = sfs_phy_addr; + + /* The Xchg is initialized and mounted to the Free Pool */ + ret = unf_init_xchg(v_lport, xchg_mgr, xchg_sum, sfs_xchg_sum); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) initialization Exchange unsuccessful, Exchange Number(%u), SFS Exchange number(%u).", + v_lport->port_id, xchg_sum, sfs_xchg_sum); + dma_free_coherent(&v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + goto free_xchg_mem; + } + + /* Apply for the memory used by GID_PT, GID_FT, and RSCN */ + ret = unf_alloc_and_init_big_sfs_pool(v_lport, xchg_mgr); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_REG_ATT, UNF_ERR, + "Port(0x%x) allocate big SFS fail", + v_lport->port_id); + + unf_free_all_rsp_pages(xchg_mgr); + dma_free_coherent(&v_lport->low_level_func.dev->dev, + xchg_mgr->sfs_mem_size, + xchg_mgr->sfs_mm_start, + xchg_mgr->sfs_phy_addr); + xchg_mgr->sfs_mm_start = 0; + goto free_xchg_mem; + } + + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + v_lport->p_xchg_mgr[i] = (void *)xchg_mgr; + list_add_tail(&xchg_mgr->xchg_mgr_entry, + &v_lport->list_xchg_mgr_head); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) ExchangeMgr:(0x%p),Base:(0x%x).", + v_lport->port_id, v_lport->p_xchg_mgr[i], + hot_pool->base); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_INFO, + "Port(0x%x) allocate Exchange Manager size(0x%x).", + v_lport->port_id, xchg_mgr->mem_size); + + return RETURN_OK; + +free_xchg_mem: + vfree(xchg_mem); +free_hot_pool: + vfree(hot_pool); +free_xchg_mgr: + vfree(xchg_mgr); +exit: + unf_free_exchg_mgr_info(v_lport); + return UNF_RETURN_ERROR; +} + +void unf_xchg_mgr_destroy(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x905, UNF_TRUE, v_lport, return); + + unf_free_all_xchg_mgr(v_lport); +} + +unsigned int unf_alloc_xchg_resource(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x906, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + INIT_LIST_HEAD(&v_lport->list_dirty_xchg_mgr_head); + INIT_LIST_HEAD(&v_lport->list_xchg_mgr_head); + spin_lock_init(&v_lport->xchg_mgr_lock); + + /* LPort Xchg Management Unit Allocation */ + if (unf_alloc_and_init_xchg_mgr(v_lport) != RETURN_OK) + return UNF_RETURN_ERROR; + + return RETURN_OK; +} + +void unf_destroy_dirty_xchg(struct unf_lport_s *v_lport, int v_show_only) +{ + unsigned int dirty_xchg = 0; + struct unf_xchg_mgr_s *exch_mgr = NULL; + unsigned long flags = 0; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + + UNF_CHECK_VALID(0x908, UNF_TRUE, v_lport, return); + + if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) { + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + list_for_each_safe(node, next_node, + &v_lport->list_dirty_xchg_mgr_head) { + exch_mgr = list_entry(node, struct unf_xchg_mgr_s, + xchg_mgr_entry); + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + if (exch_mgr) { + dirty_xchg = + (exch_mgr->free_pool.total_fcp_xchg + + exch_mgr->free_pool.total_sfs_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]Port(0x%x) has %u dirty exchange(s)", + v_lport->port_id, dirty_xchg); + + unf_show_all_xchg(v_lport, exch_mgr); + + if (v_show_only == UNF_FALSE) { + /* Delete Dirty Exchange Mgr entry */ + spin_lock_irqsave( + &v_lport->xchg_mgr_lock, + flags); + list_del_init( + &exch_mgr->xchg_mgr_entry); + spin_unlock_irqrestore( + &v_lport->xchg_mgr_lock, + flags); + + /* Free Dirty Exchange Mgr memory */ + unf_free_xchg_mgr_mem(v_lport, + exch_mgr); + } + } + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + } + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + } + + UNF_REFERNCE_VAR(dirty_xchg); +} + +struct unf_xchg_mgr_s *unf_get_xchg_mgr_by_lport(struct unf_lport_s *v_lport, + unsigned int v_idx) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x909, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x910, UNF_TRUE, v_idx < UNF_EXCHG_MGR_NUM, + return NULL); + + spin_lock_irqsave(&v_lport->xchg_mgr_lock, flags); + xchg_mgr = v_lport->p_xchg_mgr[v_idx]; + spin_unlock_irqrestore(&v_lport->xchg_mgr_lock, flags); + + return xchg_mgr; +} + +struct unf_xchg_hot_pool_s *unf_get_hot_pool_by_lport( + struct unf_lport_s *v_lport, + unsigned int v_mgr_idx) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x910, UNF_TRUE, (v_lport), return NULL); + + lport = (struct unf_lport_s *)(v_lport->root_lport); + + UNF_CHECK_VALID(0x910, UNF_TRUE, (lport), return NULL); + + /* Get Xchg Manager */ + xchg_mgr = unf_get_xchg_mgr_by_lport(lport, v_mgr_idx); + if (!xchg_mgr) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) Exchange Manager is NULL.", + lport->port_id); + + return NULL; + } + + /* Get Xchg Manager Hot Pool */ + return xchg_mgr->hot_pool; +} + +static inline void unf_hot_pool_slab_set( + struct unf_xchg_hot_pool_s *v_hot_pool, + unsigned short v_slab_index, + struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x911, UNF_TRUE, v_hot_pool, return); + + v_hot_pool->xchg_slab[v_slab_index] = v_xchg; +} + +static inline struct unf_xchg_s *unf_get_xchg_by_xchg_tag( + struct unf_xchg_hot_pool_s *v_hot_pool, + unsigned short v_slab_index) +{ + UNF_CHECK_VALID(0x912, UNF_TRUE, v_hot_pool, return NULL); + + return v_hot_pool->xchg_slab[v_slab_index]; +} + +static void *unf_lookup_xchg_by_tag(void *v_lport, + unsigned short v_hot_pool_tag) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned int exchg_mgr_idx = 0; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + + UNF_CHECK_VALID(0x913, UNF_TRUE, v_lport, return NULL); + + /* In the case of NPIV, v_pstLport is the Vport pointer, + * the share uses the ExchMgr of RootLport + */ + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x914, UNF_TRUE, lport, return NULL); + + exchg_mgr_idx = (v_hot_pool_tag * UNF_EXCHG_MGR_NUM) / + lport->low_level_func.support_max_xid_range; + if (unlikely(exchg_mgr_idx >= UNF_EXCHG_MGR_NUM)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) Get ExchgMgr %u err", + lport->port_id, exchg_mgr_idx); + + return NULL; + } + + xchg_mgr = lport->p_xchg_mgr[exchg_mgr_idx]; + + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) ExchgMgr %u is null", + lport->port_id, exchg_mgr_idx); + + return NULL; + } + + hot_pool = xchg_mgr->hot_pool; + + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) Hot Pool is NULL.", lport->port_id); + + return NULL; + } + + if (unlikely(v_hot_pool_tag >= + (hot_pool->slab_total_sum + hot_pool->base))) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]LPort(0x%x) can't Input Tag(0x%x), Max(0x%x).", + lport->port_id, v_hot_pool_tag, + (hot_pool->slab_total_sum + hot_pool->base)); + + return NULL; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + xchg = unf_get_xchg_by_xchg_tag(hot_pool, + v_hot_pool_tag - hot_pool->base); + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + return (void *)xchg; +} + +static void *unf_find_xchg_by_oxid(void *v_lport, unsigned short v_oxid, + unsigned int v_oid) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_lport_s *lport = NULL; + unsigned long flags = 0; + unsigned long xchg_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x915, UNF_TRUE, (v_lport), return NULL); + + /* In the case of NPIV, the v_lport is the Vport pointer, + * and the share uses the ExchMgr of the RootLport + */ + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x916, UNF_TRUE, (lport), return NULL); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) MgrIdex %u Hot Pool is NULL.", + lport->port_id, i); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + + /* 1. Traverse sfs_busy list */ + list_for_each_safe(node, next_node, &hot_pool->sfs_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags); + if (UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg)) { + atomic_inc(&xchg->ref_cnt); + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, flags); + return xchg; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + } + + /* 2. Traverse INI_Busy List */ + list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags); + if (UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg)) { + atomic_inc(&xchg->ref_cnt); + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, flags); + return xchg; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flags); + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } + + return NULL; +} + +static inline int unf_check_xchg_matched(struct unf_xchg_s *xchg, + unsigned long long v_command_sn, + unsigned int v_world_id) +{ + int matched = 0; + + matched = (v_command_sn == xchg->cmnd_sn); + if (matched && (atomic_read(&xchg->ref_cnt) > 0)) + return UNF_TRUE; + else + return UNF_FALSE; +} + +static void *unf_lookup_xchg_by_cmnd_sn(void *v_lport, + unsigned long long v_command_sn, + unsigned int v_world_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x919, UNF_TRUE, v_lport, return NULL); + + /* In NPIV, v_lport is a Vport pointer, and idle resources are + * shared by ExchMgr of RootLport. + * However, busy resources are mounted on each vport. + * Therefore, vport needs to be used. + */ + lport = (struct unf_lport_s *)v_lport; + UNF_CHECK_VALID(0x920, UNF_TRUE, lport, return NULL); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + lport->port_id); + + continue; + } + + /* from busy_list */ + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + if (unf_check_xchg_matched(xchg, v_command_sn, + v_world_id)) { + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, flags); + + return xchg; + } + } + + /* vport: from destroy_list */ + if (lport != lport->root_lport) { + list_for_each_safe(node, next_node, + &hot_pool->list_destroy_xchg) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + if (unf_check_xchg_matched(xchg, v_command_sn, + v_world_id)) { + spin_unlock_irqrestore( + &hot_pool->xchg_hot_pool_lock, + flags); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) lookup exchange from destroy list", + lport->port_id); + + return xchg; + } + } + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } + + return NULL; +} + +static inline unsigned int unf_alloc_hot_pool_slab( + struct unf_xchg_hot_pool_s *v_hot_pool, + struct unf_xchg_s *v_xchg, + unsigned short v_rx_id) +{ + unsigned short slab_index = 0; + + UNF_CHECK_VALID(0x921, UNF_TRUE, v_hot_pool, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x922, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + /* Check whether the hotpool tag is in the specified range sirt. + * If yes, set up the management relationship. If no, + * handle the problem according to the normal IO. + * If the sirt digitmap is used but the tag is occupied, + * it indicates that the I/O is discarded. + */ + + v_hot_pool->slab_next_index = + (unsigned short)v_hot_pool->slab_next_index; + + slab_index = v_hot_pool->slab_next_index; + while (unf_get_xchg_by_xchg_tag(v_hot_pool, slab_index)) { + slab_index++; + slab_index = slab_index % v_hot_pool->slab_total_sum; + + /* Rewind occurs */ + if (slab_index == v_hot_pool->slab_next_index) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_EQUIP_ATT, UNF_MAJOR, + "There is No Slab At Hot Pool(0x%p) for xchg(0x%p).", + v_hot_pool, v_xchg); + + return UNF_RETURN_ERROR; + } + } + + unf_hot_pool_slab_set(v_hot_pool, slab_index, v_xchg); + v_xchg->hot_pool_tag = slab_index + v_hot_pool->base; + slab_index++; + v_hot_pool->slab_next_index = + slab_index % v_hot_pool->slab_total_sum; + return RETURN_OK; +} + +struct unf_esgl_page_s *unf_get_one_free_esgl_page(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_esgl_s *esgl = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *list_head = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x923, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x924, UNF_TRUE, v_xchg, return NULL); + + lport = v_lport; + xchg = v_xchg; + + /* Obtain a new Esgl from the EsglPool and + * add it to the list_esgls of the Xchg + */ + spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag); + if (!list_empty(&lport->esgl_pool.list_esgl_pool)) { + list_head = (&lport->esgl_pool.list_esgl_pool)->next; + list_del(list_head); + lport->esgl_pool.esgl_pool_count--; + list_add_tail(list_head, &xchg->list_esgls); + + esgl = list_entry(list_head, struct unf_esgl_s, entry_esgl); + atomic_inc(&xchg->esgl_cnt); + spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) esgl pool is empty", + lport->nport_id); + + spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); + return NULL; + } + + return &esgl->page; +} + +void unf_release_esgls(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct list_head *list = NULL; + struct list_head *list_tmp = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x925, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x926, UNF_TRUE, v_xchg->lport, return); + + if (atomic_read(&v_xchg->esgl_cnt) <= 0) + return; + + /* In the case of NPIV, the Vport pointer is saved in v_pstExch, + * and the EsglPool of RootLport is shared. + */ + lport = (v_xchg->lport)->root_lport; + UNF_CHECK_VALID(0x927, UNF_TRUE, (lport), return); + + spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag); + if (!list_empty(&v_xchg->list_esgls)) { + list_for_each_safe(list, list_tmp, &v_xchg->list_esgls) { + list_del(list); + list_add_tail(list, &lport->esgl_pool.list_esgl_pool); + lport->esgl_pool.esgl_pool_count++; + atomic_dec(&v_xchg->esgl_cnt); + } + } + spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); +} + +static void unf_init_xchg_attribute(struct unf_xchg_s *v_xchg) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x973, UNF_TRUE, (v_xchg), return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + v_xchg->xchg_mgr = NULL; + v_xchg->free_pool = NULL; + v_xchg->hot_pool = NULL; + v_xchg->lport = NULL; + v_xchg->rport = NULL; + v_xchg->disc_rport = NULL; + v_xchg->io_state = UNF_IO_STATE_NEW; + v_xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE; + v_xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID; + v_xchg->io_send_abort = UNF_FALSE; + v_xchg->io_abort_result = UNF_FALSE; + v_xchg->abts_state = 0; + v_xchg->ox_id = INVALID_VALUE16; + v_xchg->abort_oxid = INVALID_VALUE16; + v_xchg->rx_id = INVALID_VALUE16; + v_xchg->sid = INVALID_VALUE32; + v_xchg->did = INVALID_VALUE32; + v_xchg->oid = INVALID_VALUE32; + v_xchg->disc_port_id = INVALID_VALUE32; + v_xchg->seq_id = INVALID_VALUE8; + v_xchg->cmnd_code = INVALID_VALUE32; + v_xchg->cmnd_sn = INVALID_VALUE64; + v_xchg->data_len = 0; + v_xchg->resid_len = 0; + v_xchg->data_direction = DMA_NONE; + v_xchg->hot_pool_tag = INVALID_VALUE16; + v_xchg->big_sfs_buf = NULL; + v_xchg->may_consume_res_cnt = 0; + v_xchg->fact_consume_res_cnt = 0; + v_xchg->io_front_jif = INVALID_VALUE64; + v_xchg->ob_callback_sts = UNF_IO_SUCCESS; + v_xchg->start_jif = 0; + v_xchg->rport_bind_jifs = INVALID_VALUE64; + v_xchg->scsi_id = INVALID_VALUE32; + v_xchg->world_id = INVALID_VALUE32; + + memset(&v_xchg->seq, 0, sizeof(struct unf_seq_s)); + memset(&v_xchg->fcp_cmnd, 0, sizeof(struct unf_fcp_cmnd_s)); + memset(&v_xchg->scsi_cmnd_info, 0, sizeof(struct unf_scsi_cmd_info_s)); + memset(&v_xchg->abts_rsps, 0, sizeof(struct unf_abts_rsps_s)); + memset(&v_xchg->dif_info, 0, sizeof(struct dif_info_s)); + memset(v_xchg->private, 0, + (PKG_MAX_PRIVATE_DATA_SIZE * sizeof(unsigned int))); + v_xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK; + v_xchg->echo_info.response_time = 0; + + if (v_xchg->xchg_type == UNF_XCHG_TYPE_INI) { + if (v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu) + memset(v_xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, + 0, sizeof(struct unf_fcprsp_iu_s)); + } else if (v_xchg->xchg_type == UNF_XCHG_TYPE_SFS) { + if (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + memset(v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + 0, sizeof(union unf_sfs_u)); + v_xchg->fcp_sfs_union.sfs_entry.cur_offset = 0; + } + } else { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Exchange Type(0x%x) SFS Union uninited.", + v_xchg->xchg_type); + } + v_xchg->xchg_type = UNF_XCHG_TYPE_INVALID; + v_xchg->pfn_ob_callback = NULL; + v_xchg->pfn_callback = NULL; + v_xchg->pfn_free_xchg = NULL; + + atomic_set(&v_xchg->ref_cnt, 0); + atomic_set(&v_xchg->esgl_cnt, 0); + atomic_set(&v_xchg->delay_flag, 0); + + if (delayed_work_pending(&v_xchg->timeout_work)) + UNF_DEL_XCHG_TIMER_SAFE(v_xchg); + + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); +} + +static void unf_add_back_to_fcp_list( + struct unf_xchg_free_pool_s *v_free_pool, + struct unf_xchg_s *v_xchg) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x928, UNF_TRUE, v_free_pool, return); + UNF_CHECK_VALID(0x929, UNF_TRUE, v_xchg, return); + + unf_init_xchg_attribute(v_xchg); + + /* The released I/O resources are added to + * the queue tail to facilitate fault locating + */ + spin_lock_irqsave(&v_free_pool->xchg_free_pool_lock, flags); + list_add_tail(&v_xchg->list_xchg_entry, + &v_free_pool->list_free_xchg_list); + v_free_pool->total_fcp_xchg++; + spin_unlock_irqrestore(&v_free_pool->xchg_free_pool_lock, flags); +} + +static void unf_check_xchg_mgr_status(struct unf_xchg_mgr_s *v_xchg_mgr) +{ + unsigned long flags = 0; + unsigned int total_xchg = 0; + unsigned int total_xchg_sum = 0; + + UNF_CHECK_VALID(0x930, UNF_TRUE, v_xchg_mgr, return); + + spin_lock_irqsave(&v_xchg_mgr->free_pool.xchg_free_pool_lock, flags); + + total_xchg = v_xchg_mgr->free_pool.total_fcp_xchg + + v_xchg_mgr->free_pool.total_sfs_xchg; + total_xchg_sum = v_xchg_mgr->free_pool.fcp_xchg_sum + + v_xchg_mgr->free_pool.sfs_xchg_sum; + + if ((v_xchg_mgr->free_pool.xchg_mgr_completion) && + (total_xchg == total_xchg_sum)) { + complete(v_xchg_mgr->free_pool.xchg_mgr_completion); + } + spin_unlock_irqrestore(&v_xchg_mgr->free_pool.xchg_free_pool_lock, + flags); +} + +static void unf_free_fcp_xchg(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x932, UNF_TRUE, v_xchg, return); + + /* Releasing a Specified INI I/O and Invoking the scsi_done Process */ + unf_done_ini_xchg(v_xchg); + free_pool = v_xchg->free_pool; + xchg_mgr = v_xchg->xchg_mgr; + lport = v_xchg->lport; + rport = v_xchg->rport; + + atomic_dec(&rport->pending_io_cnt); + /* Release the Esgls in the Xchg structure and + * return it to the EsglPool of the Lport + */ + unf_release_esgls(v_xchg); + + /* Mount I/O resources to the FCP Free linked list */ + unf_add_back_to_fcp_list(free_pool, v_xchg); + + /* The Xchg is released synchronously and then forcibly released to + * prevent the Xchg from accessing the Xchg in the normal I/O process + */ + if (unlikely(lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(xchg_mgr); +} + +static void unf_fc_abort_timeout_cmnd(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = v_lport; + struct unf_xchg_s *xchg = v_xchg; + struct unf_scsi_cmd_s scsi_cmnd = { 0 }; + unsigned long flag = 0; + unsigned int timeout_value = 2000; + unsigned int return_value = 0; + struct unf_rport_scsi_id_image_s *scsi_image_table = NULL; + + UNF_CHECK_VALID(0x936, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x937, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + if (v_xchg->io_state & INI_IO_STATE_UPABORT) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "LPort(0x%x) xchange(0x%p) OX_ID(0x%x), RX_ID(0x%x) Cmdsn(0x%lx) has been aborted.", + lport->port_id, v_xchg, v_xchg->ox_id, + v_xchg->rx_id, (unsigned long)v_xchg->cmnd_sn); + return; + } + v_xchg->io_state |= INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_NORMAL, UNF_KEVENT, + "LPort(0x%x) exchg(0x%p) OX_ID(0x%x) RX_ID(0x%x) Cmdsn(0x%lx) timeout abort it", + lport->port_id, v_xchg, v_xchg->ox_id, + v_xchg->rx_id, (unsigned long)v_xchg->cmnd_sn); + + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)v_xchg, + (unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT, + UNF_TIMER_TYPE_INI_ABTS); + + sema_init(&v_xchg->task_sema, 0); + + scsi_cmnd.scsi_id = xchg->scsi_cmnd_info.scsi_id; + scsi_cmnd.upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd; + scsi_cmnd.pfn_done = xchg->scsi_cmnd_info.pfn_done; + scsi_image_table = &lport->rport_scsi_table; + + if (unf_send_abts(lport, v_xchg) != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "LPort(0x%x) send ABTS, Send ABTS unsuccessful. Exchange OX_ID(0x%x), RX_ID(0x%x).", + lport->port_id, v_xchg->ox_id, + v_xchg->rx_id); + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + /* The message fails to be sent. + * It is released internally and does not + * need to be released externally. + */ + return; + } + + if (down_timeout(&v_xchg->task_sema, + (long long)msecs_to_jiffies(timeout_value))) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x)", + lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->rx_id); + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + + /* Cnacel the flag of INI_IO_STATE_UPABORT + * and process the io in TMF + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + return; + } + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + if ((v_xchg->ucode_abts_state == UNF_IO_SUCCESS) || + (v_xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING)) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)", + lport->port_id, v_xchg, + v_xchg->ox_id, v_xchg->rx_id, + v_xchg->ucode_abts_state); + return_value = DID_BUS_BUSY; + UNF_IO_RESULT_CNT(scsi_image_table, scsi_cmnd.scsi_id, + return_value); + unf_complete_cmnd(&scsi_cmnd, DID_BUS_BUSY << 16); + return; + } + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)v_xchg); + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + v_xchg->io_state &= ~INI_IO_STATE_UPABORT; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) send ABTS failed. Exch(0x%p) hot_tag(0x%x) ret(0x%x) v_xchg->io_state (0x%x)", + lport->port_id, v_xchg, v_xchg->hot_pool_tag, + v_xchg->scsi_cmnd_info.result, v_xchg->io_state); +} + +static void unf_fc_ini_send_abts_timeout(struct unf_lport_s *lport, + struct unf_rport_s *rport, + struct unf_xchg_s *xchg) +{ + if (xchg->rport_bind_jifs == rport->rport_alloc_jifs && + xchg->rport_bind_jifs != INVALID_VALUE64) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) first time to send abts timeout, retry again OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, xchg->io_state); + + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + (unsigned long)UNF_WAIT_ABTS_RSP_TIMEOUT, + UNF_TIMER_TYPE_INI_ABTS); + + if (unf_send_abts(lport, xchg) != RETURN_OK) { + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)xchg); + + unf_abts_timeout_recovery_default(rport, xchg); + + unf_cm_free_xchg(lport, xchg); + } + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) rport is invalid, exchg rport jiff(0x%llx 0x%llx), free exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, xchg, + xchg->rport_bind_jifs, rport->rport_alloc_jifs, + xchg->ox_id, xchg->rx_id, xchg->io_state); + + unf_cm_free_xchg(lport, xchg); + } +} + +static void unf_fc_ini_io_rec_wait_timeout(struct unf_lport_s *lport, + struct unf_rport_s *rport, + struct unf_xchg_s *xchg) +{ + unsigned long io_time_out = 0; + + if (xchg->rport_bind_jifs == rport->rport_alloc_jifs) { + unf_send_rec(lport, rport, xchg); + if (xchg->scsi_cmnd_info.abort_timeout > 0) { + io_time_out = + (xchg->scsi_cmnd_info.abort_timeout > + UNF_REC_TOV) ? + (xchg->scsi_cmnd_info.abort_timeout - + UNF_REC_TOV) : 0; + + if (io_time_out > 0) { + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + io_time_out, + UNF_TIMER_TYPE_REQ_IO); + } else { + unf_fc_abort_timeout_cmnd(lport, xchg); + } + } + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) Rec timeout exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x), bindjifs(0x%llx)no eqal Rport alloc jifs(0x%llx)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, + xchg->io_state, xchg->rport_bind_jifs, + rport->rport_alloc_jifs); + } +} + +static void unf_fc_ini_io_xchg_timeout(struct work_struct *v_work) +{ + struct unf_xchg_s *xchg = NULL; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int port_valid_flag = 0; + + UNF_REFERNCE_VAR(ret); + + xchg = container_of(v_work, struct unf_xchg_s, timeout_work.work); + UNF_CHECK_VALID(0x939, UNF_TRUE, xchg, return); + + ret = unf_xchg_ref_inc(xchg, INI_IO_TIMEOUT); + UNF_CHECK_VALID(0x940, UNF_TRUE, ret == RETURN_OK, return); + + lport = xchg->lport; + rport = xchg->rport; + + port_valid_flag = !lport || !rport; + if (port_valid_flag) { + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + + return; + } + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + + /* 1. for Send RRQ failed Timer timeout */ + if (INI_IO_STATE_RRQSEND_ERR & xchg->io_state) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[info]LPort(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for RRQ send failed OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, xchg->io_state); + + unf_cm_free_xchg(lport, xchg); + } + /* Second ABTS timeout and enter LOGO process */ + else if ((INI_IO_STATE_ABORT_TIMEOUT & xchg->io_state) && + (!(ABTS_RESPONSE_RECEIVED & xchg->abts_state))) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for second abts send OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, + xchg, xchg->ox_id, xchg->rx_id, + xchg->io_state); + + unf_abts_timeout_recovery_default(rport, xchg); + + unf_cm_free_xchg(lport, xchg); + } + /* First time to send ABTS, timeout and retry to send ABTS again */ + else if ((xchg->io_state & INI_IO_STATE_UPABORT) && + (!(xchg->abts_state & ABTS_RESPONSE_RECEIVED))) { + xchg->io_state |= INI_IO_STATE_ABORT_TIMEOUT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_fc_ini_send_abts_timeout(lport, rport, xchg); + } + /* 3. IO_DONE */ + else if ((xchg->io_state & INI_IO_STATE_DONE) && + (xchg->abts_state & ABTS_RESPONSE_RECEIVED)) { + /* + * for IO_DONE: + * 1. INI ABTS first timer time out + * 2. INI RCVD ABTS Response + * 3. Normal case for I/O Done + */ + /* Send ABTS & RCVD RSP & no timeout */ + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* Send RRQ */ + if (unf_send_rrq(lport, rport, xchg) == RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "[info]LPort(0x%x) send RRQ succeed to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, xchg, + xchg->ox_id, xchg->rx_id, xchg->io_state); + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]LPort(0x%x) can't send RRQ to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", + lport->port_id, rport->nport_id, xchg, + xchg->ox_id, xchg->rx_id, xchg->io_state); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->io_state |= INI_IO_STATE_RRQSEND_ERR; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + (unsigned long)UNF_WRITE_RRQ_SENDERR_INTERVAL, + UNF_TIMER_TYPE_INI_IO); + } + } else if (xchg->io_state & INI_IO_STATE_REC_TIMEOUT_WAIT) { + xchg->io_state &= ~INI_IO_STATE_REC_TIMEOUT_WAIT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_fc_ini_io_rec_wait_timeout(lport, rport, xchg); + } else { + /* 4. I/O Timer Timeout */ + /* vmware */ + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + unf_fc_abort_timeout_cmnd(lport, xchg); + } + + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); + + UNF_REFERNCE_VAR(ret); +} + +static inline struct unf_xchg_s *unf_alloc_io_xchg( + struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr, + unsigned int v_xchg_type, + unsigned short v_rx_id) +{ + struct unf_xchg_s *xchg = NULL; + struct list_head *list_node = NULL; + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long flags = 0; + static atomic64_t s_exhg_id; + + void (*unf_fc_io_xchg_timeout)(struct work_struct *v_work) = NULL; + + UNF_CHECK_VALID(0x941, UNF_TRUE, v_xchg_mgr, return NULL); + UNF_CHECK_VALID(0x942, UNF_TRUE, v_lport, return NULL); + + free_pool = &v_xchg_mgr->free_pool; + hot_pool = v_xchg_mgr->hot_pool; + UNF_CHECK_VALID(0x943, UNF_TRUE, free_pool, return NULL); + UNF_CHECK_VALID(0x944, UNF_TRUE, hot_pool, return NULL); + + /* 1. Free Pool */ + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags); + if (unlikely(list_empty(&free_pool->list_free_xchg_list))) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "Port(0x%x) have no Exchange anymore.", + v_lport->port_id); + + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + return NULL; + } + + /* Select an idle node from free pool */ + list_node = (&free_pool->list_free_xchg_list)->next; + list_del(list_node); + free_pool->total_fcp_xchg--; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + xchg = list_entry(list_node, struct unf_xchg_s, list_xchg_entry); + + /* + * Hot Pool: + * When xchg is mounted to Hot Pool, the mount mode and release mode + * of Xchg must be specified and stored in the sfs linked list. + */ + flags = 0; + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + if (unf_alloc_hot_pool_slab(hot_pool, xchg, v_rx_id) != RETURN_OK) { + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + unf_add_back_to_fcp_list(free_pool, xchg); + if (unlikely(v_lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(v_xchg_mgr); + + return NULL; + } + + list_add_tail(&xchg->list_xchg_entry, &hot_pool->ini_busylist); + unf_fc_io_xchg_timeout = unf_fc_ini_io_xchg_timeout; + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + /* 3. Exchange State */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->start_jif = atomic64_inc_return(&s_exhg_id); + xchg->xchg_mgr = v_xchg_mgr; + xchg->free_pool = free_pool; + xchg->hot_pool = hot_pool; + xchg->lport = v_lport; + xchg->xchg_type = v_xchg_type; + xchg->pfn_free_xchg = unf_free_fcp_xchg; + xchg->io_state = UNF_IO_STATE_NEW; + xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE; + xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID; + xchg->io_send_abort = UNF_FALSE; + xchg->io_abort_result = UNF_FALSE; + xchg->ox_id = INVALID_VALUE16; + xchg->abort_oxid = INVALID_VALUE16; + xchg->rx_id = INVALID_VALUE16; + xchg->sid = INVALID_VALUE32; + xchg->did = INVALID_VALUE32; + xchg->oid = INVALID_VALUE32; + xchg->seq_id = INVALID_VALUE8; + xchg->cmnd_code = INVALID_VALUE32; + xchg->data_len = 0; + xchg->resid_len = 0; + xchg->data_direction = DMA_NONE; + xchg->may_consume_res_cnt = 0; + xchg->fact_consume_res_cnt = 0; + xchg->io_front_jif = 0; + xchg->tmf_state = 0; + xchg->ucode_abts_state = INVALID_VALUE32; + xchg->abts_state = 0; + xchg->rport_bind_jifs = INVALID_VALUE64; + xchg->scsi_id = INVALID_VALUE32; + xchg->world_id = INVALID_VALUE32; + + memset(&xchg->dif_control, 0, sizeof(struct unf_dif_control_info_s)); + memset(&xchg->req_sgl_info, 0, sizeof(struct unf_req_sgl_info_s)); + memset(&xchg->dif_sgl_info, 0, sizeof(struct unf_req_sgl_info_s)); + memset(&xchg->abts_rsps, 0, sizeof(struct unf_abts_rsps_s)); + xchg->scsi_cmnd_info.result = 0; + + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int)atomic64_inc_return(&v_lport->exchg_index); + if (xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0) + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int)atomic64_inc_return(&v_lport->exchg_index); + + atomic_set(&xchg->ref_cnt, 0); + atomic_set(&xchg->delay_flag, 0); + + if (delayed_work_pending(&xchg->timeout_work)) + UNF_DEL_XCHG_TIMER_SAFE(xchg); + + INIT_DELAYED_WORK(&xchg->timeout_work, unf_fc_io_xchg_timeout); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + return xchg; +} + +static void unf_add_back_to_sfs_list( + struct unf_xchg_free_pool_s *v_free_pool, + struct unf_xchg_s *v_xchg) +{ + unsigned long flags = 0; + + UNF_CHECK_VALID(0x945, UNF_TRUE, v_free_pool, return); + UNF_CHECK_VALID(0x946, UNF_TRUE, v_xchg, return); + + unf_init_xchg_attribute(v_xchg); + + spin_lock_irqsave(&v_free_pool->xchg_free_pool_lock, flags); + + list_add_tail(&v_xchg->list_xchg_entry, + &v_free_pool->list_sfs_xchg_list); + v_free_pool->total_sfs_xchg++; + spin_unlock_irqrestore(&v_free_pool->xchg_free_pool_lock, flags); +} + +static void unf_free_sfs_xchg(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x947, UNF_TRUE, v_xchg, return); + + free_pool = v_xchg->free_pool; + lport = v_xchg->lport; + xchg_mgr = v_xchg->xchg_mgr; + + /* The memory is applied for when the GID_PT/GID_FT is sent. + * If no response is received, the GID_PT/GID_FT + * needs to be forcibly released. + */ + + unf_free_one_big_sfs(v_xchg); + + unf_add_back_to_sfs_list(free_pool, v_xchg); + + if (unlikely(lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(xchg_mgr); +} + +static void unf_fc_xchg_add_timer(void *v_xchg, + unsigned long v_time_ms, + enum unf_timer_type_e v_en_time_type) +{ + unsigned long flag = 0; + struct unf_xchg_s *xchg = NULL; + unsigned long time_ms = v_time_ms; + struct unf_lport_s *lport; + + UNF_CHECK_VALID(0x948, UNF_TRUE, v_xchg, return); + xchg = (struct unf_xchg_s *)v_xchg; + lport = xchg->lport; + UNF_CHECK_VALID(0x948, UNF_TRUE, lport, return); + + /* update timeout */ + switch (v_en_time_type) { + case UNF_TIMER_TYPE_INI_RRQ: + time_ms = time_ms - UNF_INI_RRQ_REDUNDANT_TIME; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_INFO, "INI RRQ Timer set."); + break; + + case UNF_TIMER_TYPE_SFS: + time_ms = time_ms + UNF_INI_ELS_REDUNDANT_TIME; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_INFO, "INI ELS Timer set."); + break; + default: + break; + } + + /* The xchg of the timer must be valid. + * If the reference count of xchg is 0, + * the timer must not be added + */ + if (atomic_read(&xchg->ref_cnt) <= 0) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[warn]Abnormal Exchange(0x%p), Reference count(0x%x), Can't add timer.", + xchg, atomic_read(&xchg->ref_cnt)); + return; + } + + /* Delay Work: Hold for timer */ + spin_lock_irqsave(&xchg->xchg_state_lock, flag); + if (queue_delayed_work(lport->xchg_wq, + &xchg->timeout_work, + (unsigned long) + msecs_to_jiffies((unsigned int)time_ms))) { + /* hold for timer */ + atomic_inc(&xchg->ref_cnt); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); +} + +static void unf_sfs_xchg_timeout(struct work_struct *v_work) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x949, UNF_TRUE, v_work, return); + xchg = container_of(v_work, struct unf_xchg_s, timeout_work.work); + UNF_CHECK_VALID(0x950, UNF_TRUE, xchg, return); + + ret = unf_xchg_ref_inc(xchg, SFS_TIMEOUT); + UNF_REFERNCE_VAR(ret); + UNF_CHECK_VALID(0x951, UNF_TRUE, ret == RETURN_OK, return); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + lport = xchg->lport; + rport = xchg->rport; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]SFS Exch(%p) Cmnd(0x%x) IO Exch(0x%p) Sid_Did(0x%x:0x%x) HotTag(0x%x) State(0x%x) Timeout.", + xchg, xchg->cmnd_code, xchg->io_xchg, xchg->sid, + xchg->did, xchg->hot_pool_tag, xchg->io_state); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->io_state & TGT_IO_STATE_ABORT) && + (xchg->cmnd_code != ELS_RRQ) && + (xchg->cmnd_code != ELS_LOGO)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "SFS Exch(0x%p) Cmnd(0x%x) Hot Pool Tag(0x%x) timeout, but aborted, no need to handle.", + xchg, xchg->cmnd_code, xchg->hot_pool_tag); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + + return; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* The sfs times out. If the sfs is ELS reply, + * go to unf_rport_error_recovery/unf_lport_error_recovery. + * Otherwise, go to the corresponding obCallback. + */ + if (UNF_XCHG_IS_ELS_REPLY(xchg) && (rport)) { + if (rport->nport_id >= UNF_FC_FID_DOM_MGR) + unf_lport_error_recovery(lport); + else + unf_rport_error_recovery(rport); + } else if (xchg->pfn_ob_callback) { + xchg->pfn_ob_callback(xchg); + } else { + /* Do nothing */ + } + + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); + unf_xchg_ref_dec(xchg, SFS_TIMEOUT); +} + +static struct unf_xchg_s *unf_alloc_sfs_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr, + unsigned int v_xchg_type, + unsigned short v_rx_id) +{ + struct unf_xchg_s *xchg = NULL; + struct list_head *list_node = NULL; + struct unf_xchg_free_pool_s *free_pool = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x952, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x953, UNF_TRUE, v_xchg_mgr, return NULL); + free_pool = &v_xchg_mgr->free_pool; + hot_pool = v_xchg_mgr->hot_pool; + UNF_CHECK_VALID(0x954, UNF_TRUE, free_pool, return NULL); + UNF_CHECK_VALID(0x955, UNF_TRUE, hot_pool, return NULL); + + /* Select an idle node from free pool */ + spin_lock_irqsave(&free_pool->xchg_free_pool_lock, flags); + if (list_empty(&free_pool->list_sfs_xchg_list)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) have no Exchange anymore.", + v_lport->port_id); + + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + return NULL; + } + + list_node = (&free_pool->list_sfs_xchg_list)->next; + list_del(list_node); + free_pool->total_sfs_xchg--; + spin_unlock_irqrestore(&free_pool->xchg_free_pool_lock, flags); + + xchg = list_entry(list_node, struct unf_xchg_s, list_xchg_entry); + + /* + * The xchg is mounted to the Hot Pool. + * The mount mode and release mode of the xchg must be specified + * and stored in the sfs linked list. + */ + flags = 0; + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + if (unf_alloc_hot_pool_slab(hot_pool, xchg, v_rx_id) != RETURN_OK) { + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + unf_add_back_to_sfs_list(free_pool, xchg); + if (unlikely(v_lport->b_port_removing == UNF_TRUE)) + unf_check_xchg_mgr_status(v_xchg_mgr); + + return NULL; + } + + list_add_tail(&xchg->list_xchg_entry, &hot_pool->sfs_busylist); + hot_pool->total_xchges++; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->free_pool = free_pool; + xchg->hot_pool = hot_pool; + xchg->lport = v_lport; + xchg->xchg_mgr = v_xchg_mgr; + xchg->pfn_free_xchg = unf_free_sfs_xchg; + xchg->xchg_type = v_xchg_type; + xchg->io_state = UNF_IO_STATE_NEW; + xchg->scsi_cmnd_info.result = 0; + xchg->ob_callback_sts = UNF_IO_SUCCESS; + + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int)atomic64_inc_return(&v_lport->exchg_index); + if (xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0) + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + (unsigned int) + atomic64_inc_return(&v_lport->exchg_index); + + if (delayed_work_pending(&xchg->timeout_work)) + UNF_DEL_XCHG_TIMER_SAFE(xchg); + + INIT_DELAYED_WORK(&xchg->timeout_work, unf_sfs_xchg_timeout); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + return xchg; +} + +static void *unf_get_new_xchg(void *v_lport, unsigned int v_xchg_type, + unsigned short v_rx_id) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int xchg_type = 0; + unsigned short xchg_mgr_type; + unsigned int rtry_cnt = 0; + unsigned int last_exchg_mgr_idx; + + xchg_mgr_type = (v_xchg_type >> 16); + xchg_type = v_xchg_type & 0xFFFF; + UNF_CHECK_VALID(0x956, UNF_TRUE, v_lport, return NULL); + + /* In the case of NPIV, the v_lport is the Vport pointer, + * and the share uses the ExchMgr of the RootLport. + */ + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x957, UNF_TRUE, (lport), return NULL); + + if (unlikely((atomic_read(&lport->port_no_operater_flag) == + UNF_LPORT_NOP) || + (atomic_read(&((struct unf_lport_s *)v_lport)->port_no_operater_flag) == + UNF_LPORT_NOP))) + return NULL; + + last_exchg_mgr_idx = + (unsigned int)atomic64_inc_return(&lport->last_exchg_mgr_idx); +try_next_mgr: + rtry_cnt++; + if (unlikely(rtry_cnt > UNF_EXCHG_MGR_NUM)) + return NULL; + + /* If Fixed mode,only use XchgMgr 0 */ + if (unlikely(xchg_mgr_type == UNF_XCHG_MGR_TYPE_FIXED)) + xchg_mgr = (struct unf_xchg_mgr_s *)lport->p_xchg_mgr[0]; + else + xchg_mgr = + (struct unf_xchg_mgr_s *) + lport->p_xchg_mgr[last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM]; + + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) get exchangemgr %u is null.", + lport->port_id, + last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM); + return NULL; + } + + last_exchg_mgr_idx++; + + /* Allocate entries based on the Exchange type */ + switch (xchg_type) { + case UNF_XCHG_TYPE_SFS: + xchg = unf_alloc_sfs_xchg(v_lport, xchg_mgr, xchg_type, + INVALID_VALUE16); + break; + + case UNF_XCHG_TYPE_INI: + xchg = unf_alloc_io_xchg(v_lport, xchg_mgr, xchg_type, + INVALID_VALUE16); + break; + + default: + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) unwonted, Exchange type(0x%x).", + lport->port_id, xchg_type); + break; + } + + if (likely(xchg)) { + xchg->ox_id = INVALID_VALUE16; + xchg->abort_oxid = INVALID_VALUE16; + xchg->rx_id = INVALID_VALUE16; + xchg->debug_hook = UNF_FALSE; + xchg->alloc_jif = jiffies; + + atomic_set(&xchg->ref_cnt, 1); + atomic_set(&xchg->esgl_cnt, 0); + } else { + goto try_next_mgr; + } + + return xchg; +} + +static void unf_free_xchg(void *v_lport, void *v_xchg) +{ + struct unf_xchg_s *xchg = NULL; + + UNF_REFERNCE_VAR(v_lport); + UNF_CHECK_VALID(0x958, UNF_TRUE, (v_xchg), return); + + xchg = (struct unf_xchg_s *)v_xchg; + unf_xchg_ref_dec(xchg, XCHG_FREE_XCHG); +} + +void unf_release_xchg_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x960, UNF_TRUE, v_lport, return); + + if (v_lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "Port(0x%x) has dirty exchange, Don't release exchange manager template.", + v_lport->port_id); + + return; + } + + memset(&v_lport->xchg_mgr_temp, 0, + sizeof(struct unf_cm_xchg_mgr_template_s)); + + v_lport->destroy_step = UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP; +} + +static void unf_xchg_abort_all_sfs_xchg(struct unf_lport_s *v_lport, + int v_clean) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long pool_lock_flags = 0; + unsigned long xchg_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x961, UNF_TRUE, v_lport, return); + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(v_lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "Port(0x%x) Hot Pool is NULL.", + v_lport->port_id); + + continue; + } + + if (v_clean == UNF_FALSE) { + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* Clearing the SFS_Busy_list Exchange Resource */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->sfs_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_lock_flags); + if (atomic_read(&xchg->ref_cnt) > 0) + xchg->io_state |= TGT_IO_STATE_ABORT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } else { + continue; + } + } +} + +static void unf_xchg_abort_ini_io_xchg(struct unf_lport_s *v_lport, + int v_clean) +{ + /* Clean L_Port/V_Port Link Down I/O: Abort */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long pool_lock_flags = 0; + unsigned long xchg_lock_flags = 0; + unsigned int io_state = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x962, UNF_TRUE, (v_lport), return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(v_lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + v_lport->port_id); + + continue; + } + + if (v_clean == UNF_FALSE) { + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* 1. Abort INI_Busy_List IO */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->ini_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_lock_flags); + if (atomic_read(&xchg->ref_cnt) > 0) + xchg->io_state |= + INI_IO_STATE_DRABORT | io_state; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } else { + /* Do nothing, just return */ + continue; + } + } +} + +static void unf_xchg_abort_all_xchg(void *v_lport, + unsigned int v_xchg_type, + int v_clean) +{ + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x964, UNF_TRUE, v_lport, return); + lport = (struct unf_lport_s *)v_lport; + + switch (v_xchg_type) { + case UNF_XCHG_TYPE_SFS: + unf_xchg_abort_all_sfs_xchg(lport, v_clean); + break; + + /* Clean L_Port/V_Port Link Down I/O: Abort */ + case UNF_XCHG_TYPE_INI: + unf_xchg_abort_ini_io_xchg(lport, v_clean); + break; + + default: + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) unknown exch type(0x%x)", + lport->port_id, v_xchg_type); + break; + } +} + +static void unf_xchg_abort_ini_send_tm_cmd(void *v_lport, + void *v_rport, + unsigned long long v_lun_id) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned long xchg_flag = 0; + unsigned int i = 0; + unsigned long long raw_lunid = 0; + + UNF_CHECK_VALID(0x981, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x981, UNF_TRUE, v_rport, return); + + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x982, UNF_TRUE, (lport), return); + rport = (struct unf_rport_s *)v_rport; + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + lport->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + + /* 1. for each exchange from busy list */ + list_for_each_safe(node, next_node, + &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + + raw_lunid = *(unsigned long long *) + (xchg->fcp_cmnd.lun) >> 16 & + 0x000000000000ffff; + if ((v_lun_id == raw_lunid) && + (rport == xchg->rport)) { + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_flag); + xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD", + xchg, xchg->io_state, lport->nport_id, + rport->nport_id, xchg->hot_pool_tag); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } +} + +static void unf_xchg_abort_by_lun(void *v_lport, + void *v_rport, + unsigned long long v_lun_id, + void *v_tm_xchg, + int v_abort_all_lun_flag) +{ + /* ABORT: set UP_ABORT tag for target LUN I/O */ + struct unf_xchg_s *tm_xchg = (struct unf_xchg_s *)v_tm_xchg; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]Port(0x%x) LUN_ID(0x%llx) TM_EXCH(0x%p) flag(%d)", + ((struct unf_lport_s *)v_lport)->port_id, + v_lun_id, v_tm_xchg, v_abort_all_lun_flag); + + /* for INI Mode */ + if (!tm_xchg) { + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + unf_xchg_abort_ini_send_tm_cmd(v_lport, v_rport, v_lun_id); + + return; + } +} + +static void unf_xchg_abort_ini_tmf_target_reset(void *v_lport, void *v_rport) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned long xchg_flag = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x981, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x981, UNF_TRUE, v_rport, return); + + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x982, UNF_TRUE, (lport), return); + rport = (struct unf_rport_s *)v_rport; + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Port(0x%x) hot pool is NULL", + lport->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + + /* 1. for each exchange from busy_list */ + list_for_each_safe(node, next_node, + &hot_pool->ini_busylist) { + xchg = list_entry(node, struct unf_xchg_s, + list_xchg_entry); + if (rport == xchg->rport) { + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_flag); + xchg->io_state |= INI_IO_STATE_TMF_ABORT; + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_flag); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD", + xchg, xchg->io_state, + lport->nport_id, + rport->nport_id, xchg->hot_pool_tag); + } + } + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + } +} + +static void unf_xchg_abort_by_session(void *v_lport, void *v_rport) +{ + /* + * LUN Reset: set UP_ABORT tag, with: + * INI_Busy_list, IO_Wait_list, + * IO_Delay_list, IO_Delay_transfer_list + */ + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[event]Port(0x%x) Rport(0x%x) start session reset with TMF", + ((struct unf_lport_s *)v_lport)->port_id, + ((struct unf_rport_s *)v_rport)->nport_id); + + unf_xchg_abort_ini_tmf_target_reset(v_lport, v_rport); +} + +static void unf_ini_busy_io_xchg_abort(void *v_hot_pool, void *v_rport, + unsigned int v_sid, unsigned int v_did, + unsigned int v_extra_io_state) +{ + /* + * for target session: Set (DRV) ABORT + * 1. R_Port remove + * 2. Send PLOGI_ACC callback + * 3. RCVD PLOGI + * 4. RCVD LOGO + */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_xchg_s *xchg = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_rport_s *rport = NULL; + unsigned long xchg_lock_flags = 0; + + rport = (struct unf_rport_s *)v_rport; + hot_pool = (struct unf_xchg_hot_pool_s *)v_hot_pool; + + /* ABORT INI IO: INI_BUSY_LIST */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->ini_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); + if ((v_did == xchg->did) && (v_sid == xchg->sid) && + (rport == xchg->rport) && + (atomic_read(&xchg->ref_cnt) > 0)) { + xchg->scsi_cmnd_info.result = + UNF_SCSI_HOST(DID_IMM_RETRY); + xchg->io_state |= INI_IO_STATE_DRABORT; + xchg->io_state |= v_extra_io_state; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Abort INI:0x%p, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, %llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + (unsigned int)xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } +} + +static void unf_xchg_mgr_io_xchg_abort(void *v_lport, void *v_rport, + unsigned int v_sid, unsigned int v_did, + unsigned int v_extra_io_state) +{ + /* + * for target session: set ABORT + * 1. R_Port remove + * 2. Send PLOGI_ACC callback + * 3. RCVD PLOGI + * 4. RCVD LOGO + */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct unf_lport_s *lport = NULL; + unsigned long pool_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x983, UNF_TRUE, v_lport, return); + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x984, UNF_TRUE, lport, return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, + UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + lport->port_id); + + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* 1. Clear INI (session) IO: INI Mode */ + unf_ini_busy_io_xchg_abort(hot_pool, v_rport, v_sid, + v_did, v_extra_io_state); + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +static void unf_xchg_mgr_sfs_xchg_abort(void *v_lport, void *v_rport, + unsigned int v_sid, unsigned int v_did) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + struct list_head *xchg_node = NULL; + struct list_head *next_xchg_node = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long pool_lock_flags = 0; + unsigned long xchg_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x991, UNF_TRUE, (v_lport), return); + + lport = ((struct unf_lport_s *)v_lport)->root_lport; + UNF_CHECK_VALID(0x992, UNF_TRUE, (lport), return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(lport, i); + if (!hot_pool) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, + UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) Hot Pool is NULL.", + lport->port_id); + + continue; + } + + rport = (struct unf_rport_s *)v_rport; + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + + /* Clear the SFS exchange of the corresponding connection */ + list_for_each_safe(xchg_node, next_xchg_node, + &hot_pool->sfs_busylist) { + xchg = list_entry(xchg_node, struct unf_xchg_s, + list_xchg_entry); + + spin_lock_irqsave(&xchg->xchg_state_lock, + xchg_lock_flags); + if ((v_did == xchg->did) && (v_sid == xchg->sid) && + (rport == xchg->rport) && + (atomic_read(&xchg->ref_cnt) > 0)) { + xchg->io_state |= TGT_IO_STATE_ABORT; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, + UNF_MAJOR, + "Abort SFS:0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", + xchg, + (unsigned int)xchg->hot_pool_tag, + (unsigned int)xchg->xchg_type, + (unsigned int)xchg->ox_id, + (unsigned int)xchg->rx_id, + (unsigned int)xchg->sid, + (unsigned int)xchg->did, + (unsigned int)xchg->io_state, + atomic_read(&xchg->ref_cnt), + xchg->alloc_jif); + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, + xchg_lock_flags); + } + + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +unsigned int unf_init_xchg_mgr_temp(struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x959, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_get_free_and_init = + unf_get_new_xchg; + v_lport->xchg_mgr_temp.pfn_unf_xchg_release = unf_free_xchg; + v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag = + unf_lookup_xchg_by_tag; + v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_id = + unf_find_xchg_by_oxid; + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer = + unf_fc_xchg_add_timer; + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer = + unf_xchg_cancel_timer; + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_all_io = + unf_xchg_abort_all_xchg; + v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_cmnd_sn = + unf_lookup_xchg_by_cmnd_sn; + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_lun = + unf_xchg_abort_by_lun; + v_lport->xchg_mgr_temp.pfn_unf_xchg_abort_by_session = + unf_xchg_abort_by_session; + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_io_xchg_abort = + unf_xchg_mgr_io_xchg_abort; + v_lport->xchg_mgr_temp.pfn_unf_xchg_mgr_sfs_xchg_abort = + unf_xchg_mgr_sfs_xchg_abort; + + return RETURN_OK; +} + +void unf_set_hot_pool_wait_state(struct unf_lport_s *v_lport, + enum int_e v_wait_state) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long pool_lock_flags = 0; + unsigned int i = 0; + + UNF_CHECK_VALID(0x965, UNF_TRUE, v_lport, return); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + hot_pool = unf_get_hot_pool_by_lport(v_lport, i); + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, + UNF_WARN, + "[warn]Port(0x%x) hot pool is NULL", + v_lport->port_id); + continue; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + hot_pool->wait_state = v_wait_state; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } +} + +unsigned int unf_xchg_ref_inc(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage) +{ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + unsigned long flags = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x967, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if (unlikely(v_xchg->debug_hook == UNF_TRUE)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OX_ID_RX_ID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Stage(%s)", + v_xchg, v_xchg->io_state, v_xchg->sid, + v_xchg->did, v_xchg->ox_id, v_xchg->rx_id, + v_xchg->alloc_jif, atomic_read(&v_xchg->ref_cnt), + io_stage[v_io_stage].stage); + } + + hot_pool = v_xchg->hot_pool; + UNF_CHECK_VALID(0x968, UNF_TRUE, hot_pool, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_io_stage); + + /* Exchange -> Hot Pool Tag check */ + if (unlikely((v_xchg->hot_pool_tag >= + (hot_pool->slab_total_sum + hot_pool->base)) || + (v_xchg->hot_pool_tag < hot_pool->base))) { + UNF_TRACE(UNF_EVTLOG_IO_ERR, UNF_LOG_IO_ATT, UNF_ERR, + "[err]Xchg(0x%p) S_ID(%xh) D_ID(0x%x) hot_pool_tag(0x%x) is bigger than slab total num(0x%x) base(0x%x)", + v_xchg, v_xchg->sid, v_xchg->did, + v_xchg->hot_pool_tag, + hot_pool->slab_total_sum + hot_pool->base, + hot_pool->base); + + return UNF_RETURN_ERROR; + } + + /* atomic read & inc */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + if (unlikely(atomic_read(&v_xchg->ref_cnt) <= 0)) { + ret = UNF_RETURN_ERROR; + } else { + if (unf_get_xchg_by_xchg_tag(hot_pool, + v_xchg->hot_pool_tag - + hot_pool->base) == + v_xchg) { + atomic_inc(&v_xchg->ref_cnt); + ret = RETURN_OK; + } else { + ret = UNF_RETURN_ERROR; + } + } + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + return ret; +} + +void unf_xchg_ref_dec(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage) +{ + /* Atomic dec ref_cnt & test, free exchange + * if necessary (ref_cnt==0) + */ + struct unf_xchg_hot_pool_s *hot_pool = NULL; + void (*pfn_free_xchg)(struct unf_xchg_s *) = NULL; + unsigned long flags = 0; + unsigned long xchg_lock_flags = 0; + + UNF_CHECK_VALID(0x969, UNF_TRUE, (v_xchg), return); + + if (v_xchg->debug_hook == UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OXID_RXID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Statge %s", + v_xchg, v_xchg->io_state, v_xchg->sid, + v_xchg->did, v_xchg->ox_id, v_xchg->rx_id, + v_xchg->alloc_jif, atomic_read(&v_xchg->ref_cnt), + io_stage[v_io_stage].stage); + } + + hot_pool = v_xchg->hot_pool; + UNF_CHECK_VALID(0x970, UNF_TRUE, hot_pool, return); + UNF_CHECK_VALID(0x970, UNF_TRUE, + v_xchg->hot_pool_tag >= hot_pool->base, return); + UNF_REFERNCE_VAR(v_io_stage); + + /* + * 1. Atomic dec & test + * 2. Free exchange if necessary (ref_cnt == 0) + */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, xchg_lock_flags); + if (atomic_dec_and_test(&v_xchg->ref_cnt)) { + pfn_free_xchg = v_xchg->pfn_free_xchg; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, + xchg_lock_flags); + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + unf_hot_pool_slab_set(hot_pool, + v_xchg->hot_pool_tag - hot_pool->base, + NULL); + /* Delete exchange list entry */ + list_del_init(&v_xchg->list_xchg_entry); + hot_pool->total_xchges--; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + // unf_free_fcp_xchg --->>> unf_done_ini_xchg + if (pfn_free_xchg) + pfn_free_xchg(v_xchg); + } else { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, + xchg_lock_flags); + } +} + +bool unf_busy_io_completed(struct unf_lport_s *v_lport) +{ + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long pool_lock_flags = 0; + unsigned int i; + + UNF_CHECK_VALID(0x5841, UNF_TRUE, v_lport, return UNF_TRUE); + + for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { + xchg_mgr = unf_get_xchg_mgr_by_lport(v_lport, i); + if (unlikely(!xchg_mgr)) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) Exchange Manager is NULL", + v_lport->port_id); + continue; + } + + spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + if (!list_empty(&xchg_mgr->hot_pool->ini_busylist)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, + UNF_INFO, "[info]Port(0x%x) ini busylist is not empty.", + v_lport->port_id); + + spin_unlock_irqrestore( + &xchg_mgr->hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + return UNF_FALSE; + } + spin_unlock_irqrestore( + &xchg_mgr->hot_pool->xchg_hot_pool_lock, + pool_lock_flags); + } + return UNF_TRUE; +} diff --git a/drivers/scsi/huawei/hifc/unf_exchg.h b/drivers/scsi/huawei/hifc/unf_exchg.h new file mode 100644 index 000000000000..fa24cd986654 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_exchg.h @@ -0,0 +1,513 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_FCEXCH_H__ +#define __UNF_FCEXCH_H__ + +#include "unf_scsi_common.h" +#include "unf_lport.h" + +#define DRV_VERIFY_CRC_MASK (1 << 1) +#define DRV_VERIFY_APP_MASK (1 << 2) +#define DRV_VERIFY_LBA_MASK (1 << 3) + +#define DRV_DIF_CRC_POS 0 +#define DRV_DIF_CRC_LEN 2 +#define DRV_DIF_APP_POS 2 +#define DRV_DIF_APP_LEN 2 +#define DRV_DIF_LBA_POS 4 +#define DRV_DIF_LBA_LEN 4 + +enum unf_ioflow_id_e { + XCHG_ALLOC = 0, + TGT_RECEIVE_ABTS, + TGT_ABTS_DONE, + TGT_IO_SRR, + SFS_RESPONSE, + SFS_TIMEOUT, + INI_SEND_CMND, + INI_RESPONSE_DONE, + INI_EH_ABORT, + INI_EH_DEVICE_RESET, + INI_EH_BLS_DONE, + INI_IO_TIMEOUT, + INI_REQ_TIMEOUT, + XCHG_CANCEL_TIMER, + XCHG_FREE_XCHG, + SEND_ELS, + IO_XCHG_WAIT, + XCHG_BUTT +}; + +enum unf_xchg_type_e { + UNF_XCHG_TYPE_INI = 0, /* INI IO */ + UNF_XCHG_TYPE_SFS = 1, /* SFS IO */ + UNF_XCHG_TYPE_INVALID +}; + +enum unf_xchg_mgr_type_e { + UNF_XCHG_MGR_TYPE_RANDOM = 0, + UNF_XCHG_MGR_TYPE_FIXED = 1, + UNF_XCHG_MGR_TYPE_INVALID +}; + +enum tgt_io_xchg_send_stage_e { + TGT_IO_SEND_STAGE_NONE = 0, + TGT_IO_SEND_STAGE_DOING = 1, /* xfer/rsp into queue */ + TGT_IO_SEND_STAGE_DONE = 2, /* xfer/rsp into queue complete */ + TGT_IO_SEND_STAGE_ECHO = 3, /* driver handled TSTS */ + TGT_IO_SEND_STAGE_INVALID +}; + +enum tgt_io_send_result_e { + TGT_IO_SEND_RESULT_OK = 0, /* xfer/rsp enqueue succeed */ + TGT_IO_SEND_RESULT_FAIL = 1, /* xfer/rsp enqueue fail */ + TGT_IO_SEND_RESULT_INVALID +}; + +struct unf_ioflow_id_s { + char *stage; +}; + +#define UNF_CHECK_OXID_MATCHED(v_oxid, v_oid, xchg) \ + ((v_oxid == xchg->ox_id) && (v_oid == xchg->oid) && \ + (atomic_read(&xchg->ref_cnt) > 0)) + +#define UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, exchg, pkg_alloc_time, \ + xchg_alloc_time) \ + do { \ + if (unlikely((pkg_alloc_time != 0) && \ + (pkg_alloc_time != xchg_alloc_time))) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_NORMAL, \ + UNF_ERR, \ + "Lport(0x%x_0x%x_0x%x_0x%p) AllocTime is not equal,PKG AllocTime:0x%x,Exhg AllocTime:0x%x", \ + lport->port_id, lport->nport_id, \ + xchg_tag, exchg, \ + pkg_alloc_time, xchg_alloc_time); \ + return UNF_RETURN_ERROR; \ + }; \ + if (unlikely(pkg_alloc_time == 0)) { \ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, \ + UNF_MAJOR, \ + "Lport(0x%x_0x%x_0x%x_0x%p) pkgtime err,PKG AllocTime:0x%x,Exhg AllocTime:0x%x", \ + lport->port_id, lport->nport_id, \ + xchg_tag, exchg, \ + pkg_alloc_time, xchg_alloc_time); \ + }; \ + } while (0) + +#define UNF_GET_DIF_ERROR_LEVEL1(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) \ + do { \ + if (DRV_VERIFY_CRC_MASK & \ + v_xchg->dif_control.protect_opcode) { \ + if (memcmp(&dif_control->actual_dif[DRV_DIF_CRC_POS], \ + &dif_control->expected_dif[DRV_DIF_CRC_POS], \ + DRV_DIF_CRC_LEN) != 0) { \ + tgt_err_code = default_err_code; \ + } \ + } \ + } while (0) + +#define UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) \ + do { \ + if ((check_err_code == tgt_err_code) && \ + (DRV_VERIFY_LBA_MASK & v_xchg->dif_control.protect_opcode)) { \ + if (memcmp(&dif_control->actual_dif[DRV_DIF_LBA_POS], \ + &dif_control->expected_dif[DRV_DIF_LBA_POS], \ + DRV_DIF_LBA_LEN) != 0) { \ + tgt_err_code = default_err_code; \ + } \ + } \ + } while (0) + +#define UNF_GET_DIF_ERROR_LEVEL3(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) \ + UNF_GET_DIF_ERROR_LEVEL2(v_xchg, dif_control, check_err_code, \ + tgt_err_code, default_err_code) + +#define UNF_SET_SCSI_CMND_RESULT(v_xchg, v_result) \ + ((v_xchg)->scsi_cmnd_info.result = (v_result)) + +#define UNF_GET_GS_SFS_XCHG_TIMER(v_lport) (3 * \ + (unsigned long)(v_lport)->ra_tov) + +#define UNF_GET_BLS_SFS_XCHG_TIMER(v_lport) (2 * \ + (unsigned long)(v_lport)->ra_tov) + +#define UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) (2 * \ + (unsigned long)(v_lport)->ra_tov) + +#define UNF_XCHG_MGR_FC 0 +#define UNF_XCHG_MIN_XID 0x0000 +#define UNF_XCHG_MAX_XID 0xffff +#define UNF_ELS_ECHO_RESULT_OK 0 +#define UNF_ELS_ECHO_RESULT_FAIL 1 + +struct unf_xchg_s; +/* Xchg hot pool, busy IO lookup Xchg */ +struct unf_xchg_hot_pool_s { + /* Xchg sum, in hot pool */ + unsigned short total_xchges; + /* Total number of resources consumedcorresponding to buffer */ + unsigned int total_res_cnt; + enum int_e wait_state; + + /* pool lock */ + spinlock_t xchg_hot_pool_lock; + + /* Xchg posiontion list */ + struct list_head sfs_busylist; + struct list_head ini_busylist; + struct list_head list_destroy_xchg; + + /* Next free hot point */ + unsigned short slab_next_index; + unsigned short slab_total_sum; + unsigned short base; + + struct unf_lport_s *lport; + + struct unf_xchg_s *xchg_slab[0]; + +}; + +/* FREE POOL of Xchg*/ +struct unf_xchg_free_pool_s { + spinlock_t xchg_free_pool_lock; + + unsigned int fcp_xchg_sum; + + /* IO used Xchg */ + struct list_head list_free_xchg_list; + unsigned int total_fcp_xchg; + + /* SFS used Xchg */ + struct list_head list_sfs_xchg_list; + unsigned int total_sfs_xchg; + unsigned int sfs_xchg_sum; + + struct completion *xchg_mgr_completion; +}; + +struct unf_big_sfs_s { + struct list_head entry_big_sfs; + void *vaddr; + unsigned int size; +}; + +struct unf_big_sfs_pool_s { + void *big_sfs_pool; + unsigned int free_count; + struct list_head list_free_pool; + struct list_head list_busy_pool; + spinlock_t big_sfs_pool_lock; +}; + +/* Xchg Manager for vport Xchg */ +struct unf_xchg_mgr_s { + /* MG type */ + unsigned int mgr_type; + + /* MG entry */ + struct list_head xchg_mgr_entry; + + /* MG attribution */ + unsigned short min_xid; + unsigned short max_xid; + unsigned int mem_size; + + /* MG alloced resource */ + void *fcp_mm_start; + + unsigned int sfs_mem_size; + void *sfs_mm_start; + dma_addr_t sfs_phy_addr; + + struct unf_xchg_free_pool_s free_pool; + struct unf_xchg_hot_pool_s *hot_pool; + + struct unf_big_sfs_pool_s st_big_sfs_pool; + + struct buf_describe_s big_sfs_buf_list; + struct buf_describe_s rsp_buf_list; + +}; + +struct unf_seq_s { + /* Seq ID */ + unsigned char seq_id; + + /* Seq Cnt */ + unsigned short seq_cnt; + + /* Seq state and len,maybe used for fcoe */ + unsigned short seq_stat; + unsigned int rec_data_len; +}; + +union unf_xchg_fcp_sfs_u { + struct unf_sfs_entry_s sfs_entry; + struct unf_fcp_rsp_iu_entry_s fcp_rsp_entry; +}; + +#define UNF_IO_STATE_NEW 0 +#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) /* succeed to send XFer rdy */ +#define TGT_IO_STATE_RSP (1 << 5) /* chip send rsp */ +#define TGT_IO_STATE_ABORT (1 << 7) + +/* INI Upper-layer Task Management Commands */ +#define INI_IO_STATE_UPTASK (1 << 15) +/* INI Upper-layer timeout Abort flag */ +#define INI_IO_STATE_UPABORT (1 << 16) +#define INI_IO_STATE_DRABORT (1 << 17) /* INI driver Abort flag */ +#define INI_IO_STATE_DONE (1 << 18) /* INI complete flag */ +#define INI_IO_STATE_WAIT_RRQ (1 << 19) /* INI wait send rrq */ +#define INI_IO_STATE_UPSEND_ERR (1 << 20) /* INI send fail flag */ +/* INI only clear firmware resource flag */ +#define INI_IO_STATE_ABORT_RESOURCE (1 << 21) +/* ioc abort:INI send ABTS ,5S timeout Semaphore,than set 1 */ +#define INI_IO_STATE_ABORT_TIMEOUT (1 << 22) +#define INI_IO_STATE_RRQSEND_ERR (1 << 23) /* INI send RRQ fail flag */ +/* INI busy IO session logo status */ +#define INI_IO_STATE_LOGO (1 << 24) +#define INI_IO_STATE_TMF_ABORT (1 << 25) /* INI TMF ABORT IO flag */ +#define INI_IO_STATE_REC_TIMEOUT_WAIT (1 << 26) /* INI REC TIMEOUT WAIT */ +#define INI_IO_STATE_REC_TIMEOUT (1 << 27) /* INI REC TIMEOUT */ + +#define TMF_RESPONSE_RECEIVED (1 << 0) +#define MARKER_STS_RECEIVED (1 << 1) +#define ABTS_RESPONSE_RECEIVED (1 << 2) + +struct unf_scsi_cmd_info_s { + unsigned long time_out; + unsigned long abort_timeout; + void *scsi_cmnd; + void (*pfn_done)(struct unf_scsi_cmd_s *); + ini_get_sgl_entry_buf pfn_unf_get_sgl_entry_buf; + struct unf_ini_error_code_s *err_code_table; /* error code table */ + char *sense_buf; + unsigned int err_code_table_cout; /* Size of the error code table */ + unsigned int buf_len; + unsigned int entry_cnt; + unsigned int result; /* Stores command execution results */ + unsigned int port_id; + /* Re-search for rport based on scsiid during retry. Otherwise, + * data inconsistency will occur + */ + unsigned int scsi_id; + void *sgl; +}; + +struct unf_req_sgl_info_s { + void *sgl; + void *sgl_start; + unsigned int req_index; + unsigned int entry_index; +}; + +struct unf_els_echo_info_s { + unsigned long long response_time; + struct semaphore echo_sync_sema; + unsigned int echo_result; +}; + +struct unf_xchg_s { + /* Mg resouce relative */ + /* list delete from HotPool */ + struct unf_xchg_hot_pool_s *hot_pool; + + /* attach to FreePool */ + struct unf_xchg_free_pool_s *free_pool; + struct unf_xchg_mgr_s *xchg_mgr; + struct unf_lport_s *lport; /* Local LPort/VLPort */ + struct unf_rport_s *rport; /* Rmote Port */ + struct unf_rport_s *disc_rport; /* Discover Rmote Port */ + struct list_head list_xchg_entry; + struct list_head list_abort_xchg_entry; + spinlock_t xchg_state_lock; + + /* Xchg reference */ + atomic_t ref_cnt; + atomic_t esgl_cnt; + int debug_hook; + /* Xchg attribution */ + unsigned short hot_pool_tag; /* Hot pool tag */ + /* Only used for abort,ox_id + * lunrset/logo/plogi/linkdown set to 0xffff + */ + unsigned short abort_oxid; + unsigned int xchg_type; /* LS,TGT CMND ,REQ,or SCSI Cmnd */ + unsigned short ox_id; + unsigned short rx_id; + unsigned int sid; + unsigned int did; + unsigned int oid; /* ID of the exchange initiator */ + unsigned int disc_port_id; /* Send GNN_ID/GFF_ID NPortId */ + unsigned char seq_id; + unsigned char byte_orders; /* Byte order */ + struct unf_seq_s seq; + + unsigned int cmnd_code; + unsigned int world_id; + /* Dif control */ + struct unf_dif_control_info_s dif_control; + struct dif_info_s dif_info; + + /* IO status Abort,timer out */ + unsigned int io_state; /* TGT_IO_STATE_E */ + unsigned int tmf_state; /* TMF STATE */ + unsigned int ucode_abts_state; + unsigned int abts_state; + + /* IO Enqueuing */ + enum tgt_io_xchg_send_stage_e io_send_stage; /* TGT_IO_SEND_STAGE_E */ + + /* IO Enqueuing result, success or failure */ + enum tgt_io_send_result_e io_send_result; /* TGT_IO_SEND_RESULT_E */ + + /* Whether ABORT is delivered to the chip for IO */ + unsigned char io_send_abort; + /* Result of delivering ABORT to the chip + * (success: UNF_TRUE; failure: UNF_FALSE) + */ + unsigned char io_abort_result; + + /* for INI,Indicates the length of the data + * transmitted over the PCI link + */ + unsigned int data_len; + + /* ResidLen,greater than 0 UnderFlow or Less than Overflow */ + int resid_len; + + /* +++++++++++++++++IO Special++++++++++++++++++++ */ + /* point to tgt cmnd/req/scsi cmnd */ + /* Fcp cmnd */ + struct unf_fcp_cmnd_s fcp_cmnd; + + struct unf_scsi_cmd_info_s scsi_cmnd_info; + + struct unf_req_sgl_info_s req_sgl_info; + + struct unf_req_sgl_info_s dif_sgl_info; + + unsigned long long cmnd_sn; + + /* timestamp */ + unsigned long long start_jif; + unsigned long long alloc_jif; + + unsigned long long io_front_jif; + + /* I/O resources to be consumed,Corresponding to buffer */ + unsigned int may_consume_res_cnt; + /* Number of resources consumed by I/Os. The value is not zero + * only when it is sent to the chip + */ + unsigned int fact_consume_res_cnt; + + /* scsi req info */ + unsigned int data_direction; + + struct unf_big_sfs_s *big_sfs_buf; + + /* scsi cmnd sense_buffer pointer */ + union unf_xchg_fcp_sfs_u fcp_sfs_union; + + /* One exchange may use several External Sgls */ + struct list_head list_esgls; + + struct unf_els_echo_info_s echo_info; + + /* +++++++++++++++++Task Special++++++++++++++++++++ */ + struct semaphore task_sema; + + /* for RRQ ,IO Xchg add to SFS Xchg */ + void *io_xchg; + + /* Xchg delay work */ + struct delayed_work timeout_work; + + /* send result callback */ + void (*pfn_ob_callback)(struct unf_xchg_s *); + + /*Response IO callback */ + void (*pfn_callback)(void *v_lport, + void *v_rport, + void *v_xchg); + + /* Xchg release function */ + void (*pfn_free_xchg)(struct unf_xchg_s *); + + /* +++++++++++++++++low level Special++++++++++++++++++++ */ + unsigned int private[PKG_MAX_PRIVATE_DATA_SIZE]; + + /* ABTS_RSP info */ + struct unf_abts_rsps_s abts_rsps; + + unsigned long long rport_bind_jifs; + + /* sfs exchg ob callback status */ + unsigned int ob_callback_sts; + unsigned int scsi_id; + atomic_t delay_flag; + void *upper_ct; +}; + +struct unf_esgl_page_s *unf_get_one_free_esgl_page(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg); +void unf_release_xchg_mgr_temp(struct unf_lport_s *v_lport); +unsigned int unf_init_xchg_mgr_temp(struct unf_lport_s *v_lport); +unsigned int unf_alloc_xchg_resource(struct unf_lport_s *v_lport); +void unf_free_all_xchg_mgr(struct unf_lport_s *v_lport); +void unf_xchg_mgr_destroy(struct unf_lport_s *v_lport); +unsigned int unf_xchg_ref_inc(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage); +void unf_xchg_ref_dec(struct unf_xchg_s *v_xchg, + enum unf_ioflow_id_e v_io_stage); +struct unf_xchg_mgr_s *unf_get_xchg_mgr_by_lport(struct unf_lport_s *v_lport, + unsigned int); +struct unf_xchg_hot_pool_s *unf_get_hot_pool_by_lport( + struct unf_lport_s *v_lport, unsigned int); +void unf_free_lport_ini_xchg(struct unf_xchg_mgr_s *v_xchg_mgr, + int v_done_ini_flag); +struct unf_xchg_s *unf_cm_lookup_xchg_by_cmnd_sn( + void *v_lport, + unsigned long long v_command_sn, + unsigned int v_world_id); +void *unf_cm_lookup_xchg_by_id(void *v_lport, unsigned short v_oxid, + unsigned int v_oid); +void unf_cm_xchg_abort_by_lun(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned long long v_lun_id, + void *v_tm_xchg, int v_abort_all_lun_flag); +void unf_cm_xchg_abort_by_session(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); + +void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, + unsigned int v_did, + unsigned int extra_io_stat); +void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_sid, + unsigned int v_did); +void unf_cm_free_xchg(void *v_lport, void *v_xchg); +void *unf_cm_get_free_xchg(void *v_lport, unsigned int v_xchg_type); +void *unf_cm_lookup_xchg_by_tag(void *v_lport, unsigned short v_hot_pool_tag); +void unf_release_esgls(struct unf_xchg_s *v_xchg); +void unf_show_all_xchg(struct unf_lport_s *v_lport, + struct unf_xchg_mgr_s *v_xchg_mgr); +void unf_destroy_dirty_xchg(struct unf_lport_s *v_lport, int v_show_only); +void unf_wakeup_scsi_task_cmnd(struct unf_lport_s *v_lport); +void unf_set_hot_pool_wait_state(struct unf_lport_s *v_lport, + enum int_e v_wait_state); +void unf_free_lport_all_xchg(struct unf_lport_s *v_lport); +bool unf_busy_io_completed(struct unf_lport_s *v_lport); +#endif + diff --git a/drivers/scsi/huawei/hifc/unf_service.c b/drivers/scsi/huawei/hifc/unf_service.c new file mode 100644 index 000000000000..263d5b43fc99 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_service.c @@ -0,0 +1,9873 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#include "unf_exchg.h" +#include "unf_log.h" +#include "unf_rport.h" +#include "unf_exchg.h" +#include "unf_service.h" +#include "unf_portman.h" +#include "unf_npiv.h" + +static void unf_flogi_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_fdisc_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_plogi_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static unsigned int unf_rec_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static void unf_gid_ft_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_gid_pt_callback(void *v_lport, + void *v_rport, + void *v_xchg); +static void unf_process_rport_after_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +static unsigned int unf_flogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_plogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_prli_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_prlo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_rscn_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_logo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_echo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_pdisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_adisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_rrq_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_rls_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg); +static unsigned int unf_send_els_rjt_by_rport( + struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + struct unf_rport_s *v_rport, + struct unf_rjt_info_s *v_rjt_info); + +unsigned int max_frame_size = UNF_DEFAULT_FRAME_SIZE; + +#define FCP_XFER_RDY_IU 0x05 +#define FCP_RSP_IU 0x07 +#define FCP_DATA_IU 0x01 + +#define UNF_GID_LAST_PORT_ID 0x80 +#define UNF_LOWLEVEL_BBCREDIT 0x6 +#define UNF_DEFAULT_BB_SC_N 0 +#define UNF_INIT_DISC 0x1 /* first time DISC */ +#define UNF_RSCN_DISC 0x2 /* RSCN Port Addr DISC */ +/* Reference from FCP-4 Table33 RR_TOV: REC_TOV + 2*R_A_TOV + 1S, + * REC_TOV = E_D_TOV + 1s + */ +#define UNF_CALC_LPORT_RRTOV(v_lport) \ + (((v_lport)->ed_tov + 1000) + (2 * (v_lport)->ra_tov + 1000)) + +#define UNF_GID_CONTROL(v_nport_id) ((v_nport_id) >> 24) + +#define UNF_ECHO_PLD_DATA 0x1234567890ABCDEF +#define UNF_ECHO_REQ_SIZE 0 + +#define UNF_GET_PORT_OPTIONS(v_fc4feature) ((v_fc4feature) >> 20) + +#define UNF_GET_DOMAIN_ID(x) (((x) & 0xFF0000) >> 16) /* domain id */ +#define UNF_GET_AREA_ID(x) (((x) & 0x00FF00) >> 8) /* area id */ + +#define UNF_SERVICE_GET_NPORTID_FORM_GID_PAGE(v_port_id_page) \ + (((unsigned int)(v_port_id_page)->port_id_domain << 16) | \ + ((unsigned int)(v_port_id_page)->port_id_area << 8) | \ + ((unsigned int)(v_port_id_page)->port_id_port)) + +#define UNF_GNN_GFF_ID_RJT_REASON(rjt_reason) \ + ((((rjt_reason) & UNF_CTIU_RJT_MASK) == \ + UNF_CTIU_RJT_UNABLE_PERFORM) && \ + ((((rjt_reason) & UNF_CTIU_RJT_EXP_MASK) == \ + UNF_CTIU_RJT_EXP_PORTID_NO_REG) || \ + (((rjt_reason) & UNF_CTIU_RJT_EXP_MASK) == \ + UNF_CTIU_RJT_EXP_PORTNAME_NO_REG) || \ + (((rjt_reason) & UNF_CTIU_RJT_EXP_MASK) == \ + UNF_CTIU_RJT_EXP_NODENAME_NO_REG))) + +#define UNF_NEED_BIG_RESPONSE_BUFF(cmnd_code) \ + (((cmnd_code) == ELS_ECHO) || ((cmnd_code) == NS_GID_PT) || \ + ((cmnd_code) == NS_GID_FT)) + +#define NEED_REFRESH_NPORTID(pkg) ((((pkg)->cmnd == ELS_PLOGI) || \ + ((pkg)->cmnd == ELS_PDISC) || \ + ((pkg)->cmnd == ELS_ADISC))) + +struct unf_els_handler_table { + unsigned int cmnd; + unsigned int (*pfn_els_cmnd_handler)(struct unf_lport_s *, + unsigned int, struct unf_xchg_s *); +}; + +#define UNF_SERVICE_COLLECT(service_collect, item) \ + do { \ + if ((item) < UNF_SERVICE_BUTT) { \ + service_collect.service_cnt[(item)]++; \ + } \ + } while (0) + +struct unf_els_handler_table els_handle[] = { + { ELS_PLOGI, unf_plogi_handler }, + { ELS_FLOGI, unf_flogi_handler }, + { ELS_LOGO, unf_logo_handler }, + { ELS_ECHO, unf_echo_handler }, + { ELS_RRQ, unf_rrq_handler }, + { ELS_REC, unf_rec_handler }, + { ELS_PRLI, unf_prli_handler }, + { ELS_PRLO, unf_prlo_handler }, + { ELS_PDISC, unf_pdisc_handler }, + { ELS_ADISC, unf_adisc_handler }, + { ELS_RSCN, unf_rscn_handler }, + { ELS_RLS, unf_rls_handler } +}; + +static void unf_check_rport_need_delay_prli(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_port_feature) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3300, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3301, UNF_TRUE, v_rport, return); + + v_port_feature &= UNF_PORT_MODE_BOTH; + + /* Used for: L_Port has INI mode & R_Port is not SW */ + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) { + /* + * 1. immediately: R_Port only with TGT, or + * L_Port only with INI & R_Port has TGT mode, + * send PRLI immediately + */ + if (((v_port_feature == UNF_PORT_MODE_TGT) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) || + ((v_port_feature & UNF_PORT_MODE_TGT) == + UNF_PORT_MODE_TGT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) send PRLI", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_port_feature); + + /* Send PRLI to remote port */ + ret = unf_send_prli(v_lport, v_rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) send PRLI failed", + v_lport->port_id, + v_lport->nport_id, + v_rport->nport_id, + v_port_feature); + + /* Do R_Port recovery */ + unf_rport_error_recovery(v_rport); + } + } else if (v_port_feature != UNF_PORT_MODE_INI) { + /* 2. R_Port has BOTH mode or unknown, + * Delay to send PRLI + */ + /* Prevent: PRLI done before PLOGI */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) delay to send PRLI", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_port_feature); + + /* Delay to send PRLI to R_Port */ + unf_rport_delay_login(v_rport); + } else { + /* 3. R_Port only with INI mode: wait for R_Port's + * PRLI: Do not care + */ + /* Cancel recovery(timer) work */ + if (delayed_work_pending(&v_rport->recovery_work)) { + if (cancel_delayed_work( + &v_rport->recovery_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) is pure INI", + v_lport->port_id, + v_lport->nport_id, + v_rport->nport_id, + v_port_feature); + + unf_rport_ref_dec(v_rport); + } + } + + /* Server: R_Port only support INI, + * do not care this case + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) wait for PRLI", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_port_feature); + } + } +} + +static unsigned int unf_low_level_bb_credit(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int bb_credit = UNF_LOWLEVEL_BBCREDIT; + + if (unlikely(!v_lport)) + return bb_credit; + + lport = v_lport; + if (unlikely(!lport->low_level_func.port_mgr_op.pfn_ll_port_config_get)) + return bb_credit; + + ret = lport->low_level_func.port_mgr_op.pfn_ll_port_config_get( + (void *)lport->fc_port, + UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, + (void *)&bb_credit); + if (unlikely(ret != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) get BB_Credit failed, use default value(%d)", + lport->port_id, UNF_LOWLEVEL_BBCREDIT); + + bb_credit = UNF_LOWLEVEL_BBCREDIT; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) with BB_Credit(%u)", + lport->port_id, bb_credit); + + return bb_credit; +} + +unsigned int unf_low_level_bbscn(struct unf_lport_s *v_lport) +{ + struct unf_lport_s *lport = v_lport; + struct unf_low_level_port_mgr_op_s *port_mgr = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int bb_scn = UNF_DEFAULT_BB_SC_N; + + if (unlikely(!v_lport)) + return bb_scn; + + port_mgr = &lport->low_level_func.port_mgr_op; + + if (unlikely(!port_mgr->pfn_ll_port_config_get)) + return bb_scn; + + ret = port_mgr->pfn_ll_port_config_get((void *)lport->fc_port, + UNF_PORT_CFG_GET_WORKBALE_BBSCN, + (void *)&bb_scn); + if (unlikely(ret != RETURN_OK)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x) get bbscn failed, use default value(%d)", + lport->port_id, UNF_DEFAULT_BB_SC_N); + + bb_scn = UNF_DEFAULT_BB_SC_N; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x)'s bbscn(%d)", + lport->port_id, bb_scn); + + return bb_scn; +} + +static unsigned int unf_els_cmnd_send(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + unsigned long time_out = 0; + + UNF_CHECK_VALID(0x3302, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3303, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3304, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if (unlikely(!v_lport->low_level_func.service_op.pfn_unf_els_send)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) ELS send function is NULL", + v_lport->port_id); + + return ret; + } + + /* Add ELS command/response (Exchange) timeout timer */ + time_out = UNF_GET_ELS_SFS_XCHG_TIMER(v_lport); + if (v_xchg->cmnd_code == ELS_RRQ) { + time_out = ((unsigned long) + UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) > + UNF_RRQ_MIN_TIMEOUT_INTERVAL) ? + (unsigned long) + UNF_GET_ELS_SFS_XCHG_TIMER(v_lport) : + UNF_RRQ_MIN_TIMEOUT_INTERVAL; + } else if (v_xchg->cmnd_code == ELS_LOGO) { + time_out = UNF_LOGO_TIMEOUT_INTERVAL; + } + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer((void *)v_xchg, + time_out, + UNF_TIMER_TYPE_SFS); + + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER] = + (unsigned int)UNF_GET_ELS_SFS_XCHG_TIMER(v_lport); + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + /* Send ELS command/response */ + ret = v_lport->low_level_func.service_op.pfn_unf_els_send( + v_lport->fc_port, v_pkg); + if (unlikely(ret != RETURN_OK)) { + /* Cancel timer if send failed */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)v_xchg); + } + + return ret; +} + +static unsigned int unf_gs_cmnd_send(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3305, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3306, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3307, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if (unlikely(!v_lport->low_level_func.service_op.pfn_unf_gs_send)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) GS send function is NULL", + v_lport->port_id); + + return ret; + } + + /* Add GS command timeout timer */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)v_xchg, + (unsigned long)UNF_GET_GS_SFS_XCHG_TIMER(v_lport), + UNF_TIMER_TYPE_SFS); + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER] = (unsigned int) + UNF_GET_GS_SFS_XCHG_TIMER(v_lport); + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + /* Send GS command */ + ret = v_lport->low_level_func.service_op.pfn_unf_gs_send( + v_lport->fc_port, v_pkg); + if (unlikely(ret != RETURN_OK)) + /* Cancel timer if send failed */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)v_xchg); + + return ret; +} + +static unsigned int unf_bls_cmnd_send(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x3308, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3309, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3310, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_pkg->private[PKG_PRIVATE_XCHG_TIMEER] = + (unsigned int)UNF_GET_BLS_SFS_XCHG_TIMER(v_lport); + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = + v_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + + return v_lport->low_level_func.service_op.pfn_unf_bls_send( + v_lport->fc_port, v_pkg); +} + +static void unf_fill_package(struct unf_frame_pkg_s *v_pkg, + struct unf_xchg_s *v_xchg, + struct unf_rport_s *v_rport) +{ + /* v_rport maybe NULL */ + UNF_CHECK_VALID(0x3311, UNF_TRUE, v_pkg, return); + UNF_CHECK_VALID(0x3312, UNF_TRUE, v_xchg, return); + + v_pkg->cmnd = v_xchg->cmnd_code; + v_pkg->fcp_cmnd = &v_xchg->fcp_cmnd; + v_pkg->frame_head.csctl_sid = v_xchg->sid; + v_pkg->frame_head.rctl_did = v_xchg->did; + v_pkg->frame_head.oxid_rxid = ((unsigned int)v_xchg->ox_id << 16 | + v_xchg->rx_id); + v_pkg->xchg_contex = v_xchg; + + UNF_CHECK_VALID(0x3313, UNF_TRUE, v_xchg->lport, return); + v_pkg->private[PKG_PRIVATE_XCHG_VP_INDEX] = + v_xchg->lport->vp_index; + + if (!v_rport) { + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX] = + UNF_RPORT_INVALID_INDEX; + v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE] = INVALID_VALUE32; + } else { + v_pkg->private[PKG_PRIVATE_XCHG_RPORT_INDEX] = + v_rport->rport_index; + v_pkg->private[PKG_PRIVATE_RPORT_RX_SIZE] = + v_rport->max_frame_size; + } + + v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = v_xchg->hot_pool_tag; + v_pkg->private[PKG_PRIVATE_LOWLEVEL_XCHG_ADD] = + v_xchg->private[PKG_PRIVATE_LOWLEVEL_XCHG_ADD]; + v_pkg->unf_cmnd_pload_bl.buffer_ptr = + (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + v_pkg->unf_cmnd_pload_bl.buf_dma_addr = + v_xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr; + + /* Low level need to know payload length if send ECHO response */ + v_pkg->unf_cmnd_pload_bl.length = + v_xchg->fcp_sfs_union.sfs_entry.cur_offset; +} + +static struct unf_xchg_s *unf_get_sfs_free_xchg_and_init( + struct unf_lport_s *v_lport, + unsigned int v_did, + struct unf_rport_s *v_rport, + union unf_sfs_u **v_fc_entry) +{ + struct unf_xchg_s *xchg = NULL; + union unf_sfs_u *fc_entry = NULL; + + xchg = unf_cm_get_free_xchg(v_lport, UNF_XCHG_TYPE_SFS); + if (!xchg) + return NULL; + + xchg->did = v_did; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = v_lport; + xchg->rport = v_rport; + xchg->disc_rport = NULL; + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) + xchg->ox_id = xchg->hot_pool_tag; + + xchg->pfn_callback = NULL; + xchg->pfn_ob_callback = NULL; + + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return NULL; + } + + *v_fc_entry = fc_entry; + + return xchg; +} + +static void unf_scr_callback(void *v_lport, + void *v_rport, + void *v_xchg) +{ + /* Callback function for SCR response: Send GID_PT with INI mode */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_disc_s *disc = &lport->disc; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_els_acc_s *els_acc = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long lport_flag = 0; + unsigned long disc_flag = 0; + unsigned int cmnd = 0; + + UNF_CHECK_VALID(0x3694, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3695, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + UNF_REFERNCE_VAR(ret); + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + return; + + els_acc = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_acc; + if (xchg->byte_orders & UNF_BIT_2) + cmnd = be32_to_cpu(els_acc->cmnd); + else + cmnd = (els_acc->cmnd); + + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* About ELS_CMND ACC */ + spin_lock_irqsave(&lport->lport_state_lock, lport_flag); + + /* Check L_Port state: SCR_WAIT */ + if (lport->en_states != UNF_LPORT_ST_SCR_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, + lport_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) receive SCR ACC with error state(0x%x)", + lport->port_id, lport->nport_id, + lport->en_states); + return; + } + + /* Update L_Port state machine: Ready */ + /* LPort: SCR_WAIT --> READY */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + if (lport->en_states == UNF_LPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) enter READY state when received SCR response", + lport->port_id, lport->nport_id); + } + + /* Start to Discovery with INI mode: GID_PT */ + if ((lport->options & UNF_PORT_MODE_INI) == + UNF_PORT_MODE_INI) { + spin_unlock_irqrestore(&lport->lport_state_lock, + lport_flag); + + if (lport->disc.unf_disc_temp.pfn_unf_disc_start) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, + disc_flag); + lport->disc.disc_option = UNF_INIT_DISC; + disc->last_disc_jiff = jiffies; + spin_unlock_irqrestore( + &disc->rport_busy_pool_lock, disc_flag); + + ret = lport->disc.unf_disc_temp.pfn_unf_disc_start(lport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) DISC %s with INI mode", + lport->port_id, + (ret != RETURN_OK) ? "failed" : + "succeed"); + } + + UNF_REFERNCE_VAR(ret); + return; + } + /* TGT mode: Do not care */ + spin_unlock_irqrestore(&lport->lport_state_lock, lport_flag); + + /* NOTE: set state with UNF_DISC_ST_END used for RSCN process */ + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + lport->disc.en_states = UNF_DISC_ST_END; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) is TGT mode, no need to discovery", + lport->port_id); + return; + } + /* About ELS_CMND response: RJT */ + unf_lport_error_recovery(lport); + + UNF_REFERNCE_VAR(ret); +} + +static void unf_scr_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Callback fucnion for exception: Do L_Port error recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3692, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3693, UNF_TRUE, lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send SCR failed and do port recovery", + lport->port_id); + + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_scr(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* after RCVD RFF_ID ACC */ + struct unf_scr_s *scr = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3314, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3315, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + /* Get free exchange for SCR */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + NULL, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for SCR", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_SCR; /* SCR */ + ox_id = xchg->ox_id; + /* Set callback function */ + xchg->pfn_callback = unf_scr_callback; + xchg->pfn_ob_callback = unf_scr_ob_callback; + + /* Fill command/response package */ + unf_fill_package(&pkg, xchg, v_rport); + + scr = &fc_entry->scr; + memset(scr, 0, sizeof(struct unf_scr_s)); + scr->payload[0] = (UNF_GS_CMND_SCR); /* SCR is 0x62 */ + scr->payload[1] = (UNF_FABRIC_FULL_REG); /* Full registration */ + + /* Send SCR command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: SCR send %s. Port(0x%x_0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rec_pld(struct unf_rec_pld_s *v_rec_pld, + unsigned int v_sid, + unsigned short v_oxid) +{ + UNF_CHECK_VALID(0x3339, UNF_TRUE, v_rec_pld, return); + + v_rec_pld->rec_cmnd = UNF_ELS_CMND_REC; + v_rec_pld->xchg_org_sid = v_sid; + v_rec_pld->ox_id = v_oxid; + v_rec_pld->rx_id = INVALID_VALUE16; +} + +unsigned int unf_send_rec(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_io_xchg) +{ + struct unf_rec_pld_s *rec_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3324, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3325, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3325, UNF_TRUE, v_io_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get & Set new free exchange */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PLOGI", + v_lport->port_id); + return ret; + } + + xchg->cmnd_code = ELS_REC; + ox_id = xchg->ox_id; + unf_fill_package(&pkg, xchg, v_rport); + + rec_pld = &fc_entry->rec.rec_pld; + memset(rec_pld, 0, sizeof(struct unf_rec_pld_s)); + + unf_fill_rec_pld(rec_pld, v_lport->nport_id, v_io_xchg->ox_id); + + /* Start to Send REC command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]LOGIN: Send REC %s. Port(0x%x_0x%x_0x%llx)--->RPort(0x%x_0x%llx) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, v_lport->port_name, + v_rport->nport_id, v_rport->port_name, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_flogi_pld(struct unf_flogi_payload_s *v_flogi_pld, + struct unf_lport_s *v_lport) +{ + struct unf_fabric_parms_s *fabric_parms = NULL; + + UNF_CHECK_VALID(0x3316, UNF_TRUE, v_flogi_pld, return); + UNF_CHECK_VALID(0x3317, UNF_TRUE, v_lport, return); + + fabric_parms = &v_flogi_pld->fabric_parms; + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) || + (v_lport->en_act_topo == UNF_TOP_P2P_MASK)) { + /* Fabric or P2P topology */ + fabric_parms->co_parms.bb_credit = + unf_low_level_bb_credit(v_lport); + fabric_parms->co_parms.lowest_version = + UNF_PLOGI_VERSION_LOWER; + fabric_parms->co_parms.highest_version = + UNF_PLOGI_VERSION_UPPER; + fabric_parms->co_parms.bb_receive_data_field_size = + (v_lport->max_frame_size); + fabric_parms->co_parms.bb_scn = unf_low_level_bbscn(v_lport); + } else { + /* Loop topology here */ + fabric_parms->co_parms.clean_address = + UNF_CLEAN_ADDRESS_DEFAULT; + fabric_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; + fabric_parms->co_parms.lowest_version = + UNF_PLOGI_VERSION_LOWER; + fabric_parms->co_parms.highest_version = + UNF_PLOGI_VERSION_UPPER; + fabric_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; /* :1 */ + fabric_parms->co_parms.bb_receive_data_field_size = + (v_lport->max_frame_size); + } + + if (v_lport->low_level_func.support_max_npiv_num != 0) + fabric_parms->co_parms.clean_address = 1; /* support NPIV */ + + fabric_parms->cl_parms[2].valid = UNF_CLASS_VALID; + fabric_parms->cl_parms[2].priority = UNF_PRIORITY_DISABLE; + + fabric_parms->cl_parms[2].sequential_delivery = + UNF_SEQUEN_DELIVERY_REQ; + fabric_parms->cl_parms[2].received_data_field_size = + (v_lport->max_frame_size); + + fabric_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + fabric_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + fabric_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + fabric_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); +} + +static void unf_flogi_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Send FLOGI failed & Do L_Port recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3644, UNF_TRUE, v_xchg, return); + + /* Get L_port from exchange context */ + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + UNF_CHECK_VALID(0x3645, UNF_TRUE, lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FLOGI failed", + lport->port_id); + + /* Check L_Port state */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send FLOGI failed with state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + return; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Do L_Port error recovery */ + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_flogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_xchg_s *xchg = NULL; + struct unf_flogi_payload_s *flogi_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3318, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3319, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + /* Get & Set New free Exchange Context */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for FLOGI", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_FLOGI; /* FLOGI */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* for rcvd flogi acc/rjt processer */ + xchg->pfn_callback = unf_flogi_callback; + /* for send flogi failed processer */ + xchg->pfn_ob_callback = unf_flogi_ob_callback; + + /* Fill package: Exchange --to-->> Package */ + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill Flogi Payload */ + flogi_pld = &fc_entry->flogi.flogi_payload; + memset(flogi_pld, 0, sizeof(struct unf_flogi_payload_s)); + unf_fill_flogi_pld(flogi_pld, v_lport); + flogi_pld->cmnd = (UNF_ELS_CMND_FLOGI); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Begin to send FLOGI. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, flogi_pld, + sizeof(struct unf_flogi_payload_s)); + + /* Start to send FLOGI command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[warn]LOGIN: Send FLOGI failed. Port(0x%x)--->rport(0x%x)", + v_lport->port_id, v_rport->nport_id); + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + } + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fdisc_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3638, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: FDISC send failed"); + + UNF_CHECK_VALID(0x3639, UNF_TRUE, NULL != lport, return); + + /* Do L_Port error recovery */ + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_fdisc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_xchg_s *exch = NULL; + struct unf_flogi_payload_s *fdisc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3320, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3321, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + exch = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!exch) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for FDISC", + v_lport->port_id); + + return ret; + } + + exch->cmnd_code = ELS_FDISC; /* FDISC */ + + ox_id = exch->ox_id; + + /* Set callback function */ + exch->pfn_callback = unf_fdisc_callback; + exch->pfn_ob_callback = unf_fdisc_ob_callback; + + unf_fill_package(&pkg, exch, v_rport); + + /* Fill FDISC entry(payload) */ + fdisc_pld = &fc_entry->fdisc.fdisc_payload; + memset(fdisc_pld, 0, sizeof(struct unf_flogi_payload_s)); + unf_fill_flogi_pld(fdisc_pld, v_lport); + fdisc_pld->cmnd = UNF_ELS_CMND_FDISC; /* update cmnd type */ + + /* Start to send FDISC */ + ret = unf_els_cmnd_send(v_lport, &pkg, exch); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)exch); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: FDISC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_plogi_pld(struct unf_plogi_payload_s *v_plogi_pld, + struct unf_lport_s *v_lport) +{ + struct unf_lgn_parms_s *login_parms = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3322, UNF_TRUE, v_plogi_pld, return); + UNF_CHECK_VALID(0x3323, UNF_TRUE, v_lport, return); + + lport = v_lport->root_lport; + v_plogi_pld->cmnd = (UNF_ELS_CMND_PLOGI); + login_parms = &v_plogi_pld->parms; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + /* P2P or Fabric mode */ + login_parms->co_parms.bb_credit = + (unf_low_level_bb_credit(v_lport)); + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_NFPORT; /* 0 */ + login_parms->co_parms.bb_scn = + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) ? + 0 : unf_low_level_bbscn(v_lport); + } else { + /* Public loop & Private loop mode */ + login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; /* 0 */ + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; /* 1 */ + } + + login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; + login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; + login_parms->co_parms.continuously_increasing = + UNF_CONTIN_INCREASE_SUPPORT; + login_parms->co_parms.bb_receive_data_field_size = + (v_lport->max_frame_size); + login_parms->co_parms.nport_total_concurrent_sequences = + (UNF_PLOGI_CONCURRENT_SEQ); + login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); + login_parms->co_parms.e_d_tov = UNF_DEFAULT_EDTOV; + if (lport->b_priority == UNF_PRIORITY_ENABLE) + login_parms->cl_parms[2].priority = UNF_PRIORITY_ENABLE; + else + login_parms->cl_parms[2].priority = UNF_PRIORITY_DISABLE; + + login_parms->cl_parms[2].valid = UNF_CLASS_VALID; /* for class_3 */ + login_parms->cl_parms[2].received_data_field_size = + (v_lport->max_frame_size); + login_parms->cl_parms[2].concurrent_sequences = + (UNF_PLOGI_CONCURRENT_SEQ); + login_parms->cl_parms[2].open_sequences_per_exchange = + (UNF_PLOGI_SEQ_PER_XCHG); + + login_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + login_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + login_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + login_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_plogi_pld, + sizeof(struct unf_plogi_payload_s)); +} + +static void unf_plogi_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do L_Port or R_Port recovery */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3656, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3657, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3734, UNF_TRUE, rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI(0x%x_0x%x) to RPort(%p:0x%x_0x%x) failed", + lport->port_id, lport->nport_id, v_xchg->ox_id, + v_xchg->rx_id, rport, rport->rport_index, rport->nport_id); + + /* Start to recovery */ + if (rport->nport_id > UNF_FC_FID_DOM_MGR) { + /* with Name server: R_Port is fabric --->>> + * L_Port error recovery + */ + unf_lport_error_recovery(lport); + } else { + /* R_Port is not fabric --->>> R_Port error recovery */ + unf_rport_error_recovery(rport); + } +} + +unsigned int unf_send_plogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_plogi_payload_s *plogi_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3324, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3325, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get & Set new free exchange */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PLOGI", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_PLOGI; /* PLOGI */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* for rcvd plogi acc/rjt processer */ + xchg->pfn_callback = unf_plogi_callback; + /* for send plogi failed processer */ + xchg->pfn_ob_callback = unf_plogi_ob_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill PLOGI payload */ + plogi_pld = &fc_entry->plogi.payload; + memset(plogi_pld, 0, sizeof(struct unf_plogi_payload_s)); + unf_fill_plogi_pld(plogi_pld, v_lport); + + /* Start to Send PLOGI command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send PLOGI %s. Port(0x%x_0x%x_0x%llx)--->rport(0x%x_0x%llx) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, v_lport->port_name, + v_rport->nport_id, v_rport->port_name, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_logo_pld(struct unf_logo_payload_s *v_logo_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3326, UNF_TRUE, v_logo_pld, return); + UNF_CHECK_VALID(0x3327, UNF_TRUE, v_lport, return); + + v_logo_pld->cmnd = UNF_ELS_CMND_LOGO; + v_logo_pld->nport_id = (v_lport->nport_id); + v_logo_pld->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + v_logo_pld->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_logo_pld, + sizeof(struct unf_logo_payload_s)); +} + +static void unf_logo_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport; + struct unf_rport_s *rport; + struct unf_rport_s *old_rport; + struct unf_xchg_s *xchg; + unsigned int nport_id = 0; + unsigned int logo_retry = 0; + + UNF_CHECK_VALID(0x3675, UNF_TRUE, NULL, return); + xchg = v_xchg; + old_rport = xchg->rport; + logo_retry = old_rport->logo_retries; + + if (old_rport->nport_id != INVALID_VALUE32) + unf_rport_enter_closing(old_rport); + + lport = xchg->lport; + if (unf_is_lport_valid(lport) != RETURN_OK) + return; + + /* Get R_Port by exchange info: Init state */ + nport_id = xchg->did; + rport = unf_get_rport_by_nport_id(lport, nport_id); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_INIT, + nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) cannot allocate RPort", + lport->port_id); + return; + } + + rport->logo_retries = logo_retry; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]LOGIN: Port(0x%x) received LOGO RSP timeout topo(0x%x) retries(%u)", + lport->port_id, lport->en_act_topo, rport->logo_retries); + + /* RCVD LOGO/PRLO & SEND LOGO: the same process */ + if (rport->logo_retries < UNF_MAX_RETRY_COUNT) { + /* <: retry (LOGIN or LOGO) if necessary */ + unf_process_rport_after_logo(lport, rport); + } else { + /* >=: Link down */ + unf_rport_immediate_linkdown(lport, rport); + } +} + +static void unf_logo_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RCVD LOGO ACC/RJT: retry(LOGIN/LOGO) or link down immediately */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *rport = NULL; + struct unf_rport_s *old_rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_els_rjt_s *els_acc_rjt = NULL; + unsigned int cmnd = 0; + unsigned int nport_id = 0; + unsigned int logo_retry = 0; + + UNF_CHECK_VALID(0x3675, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + + xchg = (struct unf_xchg_s *)v_xchg; + old_rport = xchg->rport; + + logo_retry = old_rport->logo_retries; + if (old_rport->nport_id != INVALID_VALUE32) + unf_rport_enter_closing(old_rport); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) + return; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + return; + + /* Get R_Port by exchange info: Init state */ + els_acc_rjt = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_rjt; + nport_id = xchg->did; + rport = unf_get_rport_by_nport_id(lport, nport_id); + rport = unf_get_safe_rport(lport, rport, + UNF_RPORT_REUSE_INIT, nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) cannot allocate RPort", + lport->port_id); + return; + } + + rport->logo_retries = logo_retry; + cmnd = be32_to_cpu(els_acc_rjt->cmnd); + UNF_REFERNCE_VAR(cmnd); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x) received LOGO RSP(0x%x), topo(0x%x) Port options(0x%x) RPort options(0x%x) retries(%d)", + lport->port_id, (cmnd & UNF_ELS_CMND_HIGH_MASK), + lport->en_act_topo, + lport->options, rport->options, rport->logo_retries); + + /* RCVD LOGO/PRLO & SEND LOGO: the same process */ + if (rport->logo_retries < UNF_MAX_RETRY_COUNT) + /* <: retry (LOGIN or LOGO) if necessary */ + unf_process_rport_after_logo(lport, rport); + else + /* >=: Link down */ + unf_rport_immediate_linkdown(lport, rport); +} + +unsigned int unf_send_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_logo_payload_s *logo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3328, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for LOGO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_LOGO; /* LOGO */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* retry or link down immediately */ + xchg->pfn_callback = unf_logo_callback; + xchg->pfn_ob_callback = unf_logo_ob_callback; /* do nothing */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill LOGO entry(payload) */ + logo_pld = &fc_entry->logo.payload; + memset(logo_pld, 0, sizeof(struct unf_logo_payload_s)); + unf_fill_logo_pld(logo_pld, v_lport); + + /* Start to send LOGO command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + v_rport->logo_retries++; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]LOGIN: LOGO send %s. Port(0x%x)--->rport(0x%x) OXID(0x%x) Retries(%d)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, + ox_id, v_rport->logo_retries); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +unsigned int unf_send_logo_by_did(struct unf_lport_s *v_lport, + unsigned int v_did) +{ + /* Has non R_Port */ + struct unf_logo_payload_s *logo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3329, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_did, NULL, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for LOGO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_LOGO; /* LOGO */ + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, NULL); + + /* Fill LOGO entry(payload) */ + logo_pld = &fc_entry->logo.payload; + memset(logo_pld, 0, sizeof(struct unf_logo_payload_s)); + unf_fill_logo_pld(logo_pld, v_lport); + + /* Start to send LOGO now */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: LOGO send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_did, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void *unf_get_one_big_sfs_buf(struct unf_xchg_s *v_xchg) +{ + struct unf_big_sfs_s *big_sfs = NULL; + struct list_head *list_head = NULL; + struct unf_xchg_mgr_s *xchg_mgr = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3330, UNF_TRUE, v_xchg, return NULL); + xchg_mgr = v_xchg->xchg_mgr; + UNF_CHECK_VALID(0x3331, UNF_TRUE, xchg_mgr, return NULL); + + spin_lock_irqsave(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, flag); + if (!list_empty(&xchg_mgr->st_big_sfs_pool.list_free_pool)) { + /* from free to busy */ + list_head = (&xchg_mgr->st_big_sfs_pool.list_free_pool)->next; + list_del(list_head); + xchg_mgr->st_big_sfs_pool.free_count--; + list_add_tail(list_head, + &xchg_mgr->st_big_sfs_pool.list_busy_pool); + big_sfs = list_entry(list_head, struct unf_big_sfs_s, + entry_big_sfs); + } else { + spin_unlock_irqrestore( + &xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Allocate big sfs buf failed, count(0x%x) exchange(0x%p) command(0x%x)", + xchg_mgr->st_big_sfs_pool.free_count, + v_xchg, v_xchg->cmnd_code); + + return NULL; + } + spin_unlock_irqrestore(&xchg_mgr->st_big_sfs_pool.big_sfs_pool_lock, + flag); + + v_xchg->big_sfs_buf = big_sfs; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Allocate one address(0x%p) of big sfs buffer, remaining count(0x%x) exchange(0x%p) command(0x%x)", + big_sfs->vaddr, + xchg_mgr->st_big_sfs_pool.free_count, + v_xchg, + v_xchg->cmnd_code); + + return big_sfs->vaddr; +} + +static void unf_echo_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *rport = (struct unf_rport_s *)v_rport; + struct unf_xchg_s *xchg = NULL; + struct unf_echo_payload_s *echo_rsp_pld = NULL; + unsigned int cmnd = 0; + unsigned int mag_ver_local = 0; + unsigned int mag_ver_remote = 0; + + UNF_CHECK_VALID(0x3332, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3333, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3334, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(rport); + + xchg = (struct unf_xchg_s *)v_xchg; + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + return; + + echo_rsp_pld = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.echo_pld; + UNF_CHECK_VALID(0x3335, UNF_TRUE, NULL != echo_rsp_pld, return); + + if (xchg->byte_orders & UNF_BIT_2) { + unf_big_end_to_cpu((unsigned char *)echo_rsp_pld, + sizeof(struct unf_echo_payload_s)); + cmnd = echo_rsp_pld->cmnd; + } else { + cmnd = echo_rsp_pld->cmnd; + } + + mag_ver_local = echo_rsp_pld->data[0]; + mag_ver_remote = echo_rsp_pld->data[1]; + + /* Print info */ + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + if ((mag_ver_local == ECHO_MG_VERSION_LOCAL) && + (mag_ver_remote == ECHO_MG_VERSION_REMOTE)) { + /* both side are 1822 */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local snd echo:(0x%x), remote rcv echo:(0x%x), remote snd echo acc:(0x%x), local rcv echo acc:(0x%x)", + lport->port_id, rport->nport_id, + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME], + xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); + } else if ((mag_ver_local == ECHO_MG_VERSION_LOCAL) && + (mag_ver_remote != ECHO_MG_VERSION_REMOTE)) { + /* the peer don't supprt smartping, only local snd + * and rcv rsp time stamp + */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local snd echo:(0x%x), local rcv echo acc:(0x%x)", + lport->port_id, rport->nport_id, + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); + } else if ((mag_ver_local != ECHO_MG_VERSION_LOCAL) && + (mag_ver_remote != ECHO_MG_VERSION_REMOTE)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local and remote is not IN300", + lport->port_id, rport->nport_id); + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ECHO to RPort(0x%x) and received RJT", + lport->port_id, rport->nport_id); + } + + xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK; + xchg->echo_info.response_time = jiffies - + xchg->echo_info.response_time; + + /* wake up semaphore */ + up(&xchg->echo_info.echo_sync_sema); +} + +static void unf_echo_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x3336, UNF_TRUE, v_xchg, return); + lport = v_xchg->lport; + UNF_CHECK_VALID(0x3337, UNF_TRUE, lport, return); + rport = v_xchg->rport; + UNF_CHECK_VALID(0x3338, UNF_TRUE, rport, return); + + UNF_REFERNCE_VAR(lport); + UNF_REFERNCE_VAR(rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ECHO to RPort(0x%x) but timeout", + lport->port_id, rport->nport_id); + + v_xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_FAIL; + + /* wake up semaphore */ + up(&v_xchg->echo_info.echo_sync_sema); +} + +unsigned int unf_send_echo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int *v_time) +{ + struct unf_echo_payload_s *echo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long delay = 0; + unsigned short ox_id = 0; + dma_addr_t phy_echo_addr; + + UNF_CHECK_VALID(0x3340, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3341, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3342, UNF_TRUE, v_time, return UNF_RETURN_ERROR); + + delay = 2 * (unsigned long)(v_lport->ra_tov); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for ECHO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_ECHO; /* ECHO */ + + xchg->fcp_sfs_union.sfs_entry.cur_offset = UNF_ECHO_REQ_SIZE; + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_echo_callback; /* wake up semaphore */ + xchg->pfn_ob_callback = unf_echo_ob_callback; /* wake up semaphore */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill ECHO entry(payload) */ + echo_pld = (struct unf_echo_payload_s *)unf_get_one_big_sfs_buf(xchg); + if (!echo_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't allocate buffer for ECHO", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + + fc_entry->echo.echo_pld = echo_pld; + phy_echo_addr = pci_map_single(v_lport->low_level_func.dev, echo_pld, + UNF_ECHO_PAYLOAD_LEN, DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error( + v_lport->low_level_func.dev, phy_echo_addr)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) pci map err", + v_lport->port_id); + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + fc_entry->echo.phy_echo_addr = phy_echo_addr; + memset(echo_pld, 0, sizeof(struct unf_echo_payload_s)); + echo_pld->cmnd = (UNF_ELS_CMND_ECHO); + echo_pld->data[0] = ECHO_MG_VERSION_LOCAL; + + ret = unf_xchg_ref_inc(xchg, SEND_ELS); + UNF_CHECK_VALID(0x3343, UNF_TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + /* Start to send ECHO command */ + xchg->echo_info.response_time = jiffies; + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) { + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + } else { + if (down_timeout(&xchg->echo_info.echo_sync_sema, + (long) + msecs_to_jiffies((unsigned int)delay))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]ECHO send %s. Port(0x%x)--->rport(0x%x) but response timeout with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + xchg->echo_info.echo_result = + UNF_ELS_ECHO_RESULT_FAIL; + } + + if (xchg->echo_info.echo_result == + UNF_ELS_ECHO_RESULT_FAIL) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "Echo send fail or timeout"); + + ret = UNF_RETURN_ERROR; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "echo acc rsp,echo_cmd_snd(0x%xus)-->echo_cmd_rcv(0x%xus)-->echo_acc_snd(0x%xus)-->echo_acc_rcv(0x%xus).", + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME], + xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME], + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); + + *v_time = ( + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME] - + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME]) - + (xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME] - + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME]); + } + } + + pci_unmap_single(v_lport->low_level_func.dev, phy_echo_addr, + UNF_ECHO_PAYLOAD_LEN, DMA_BIDIRECTIONAL); + fc_entry->echo.phy_echo_addr = 0; + unf_xchg_ref_dec(xchg, SEND_ELS); + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_prli_pld(struct unf_pril_payload_s *v_prli_pld, + struct unf_lport_s *v_lport) +{ + unsigned int pld_len = 0; + + UNF_CHECK_VALID(0x3344, UNF_TRUE, v_prli_pld, return); + UNF_CHECK_VALID(0x3345, UNF_TRUE, v_lport, return); + + pld_len = sizeof(struct unf_pril_payload_s) - UNF_PRLI_SIRT_EXTRA_SIZE; + v_prli_pld->cmnd = (UNF_ELS_CMND_PRLI | + ((unsigned int)UNF_FC4_FRAME_PAGE_SIZE << + UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | + ((unsigned int)pld_len)); + + v_prli_pld->parms[0] = (UNF_FC4_FRAME_PARM_0_FCP | + UNF_FC4_FRAME_PARM_0_I_PAIR); + v_prli_pld->parms[1] = UNF_NOT_MEANINGFUL; + v_prli_pld->parms[2] = UNF_NOT_MEANINGFUL; + + /* About Read Xfer_rdy disable */ + v_prli_pld->parms[3] = (UNF_FC4_FRAME_PARM_3_R_XFER_DIS | + v_lport->options); + + /* About FCP confirm */ + if (v_lport->low_level_func.lport_cfg_items.fcp_conf == UNF_TRUE) + v_prli_pld->parms[3] |= UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + + /* About Tape support */ + if (v_lport->low_level_func.lport_cfg_items.tape_support) { + v_prli_pld->parms[3] |= + (UNF_FC4_FRAME_PARM_3_REC_SUPPORT | + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT | + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT | + UNF_FC4_FRAME_PARM_3_CONF_ALLOW); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x)'s PRLI payload: options(0x%x) parameter-3(0x%x)", + v_lport->port_id, v_lport->options, v_prli_pld->parms[3]); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_prli_pld, + sizeof(struct unf_pril_payload_s)); +} + +static void unf_prli_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RCVD PRLI RSP: ACC or RJT --->>> SCSI Link Up */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_pril_payload_s *prli_acc_pld = NULL; + unsigned long flag = 0; + unsigned int cmnd = 0; + unsigned int options = 0; + unsigned int fcp_conf = 0; + unsigned int rec_support = 0; + unsigned int task_retry_support = 0; + unsigned int retry_support = 0; + unsigned int tape_support = 0; + enum unf_rport_login_state_e rport_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3679, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3680, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3681, UNF_TRUE, v_xchg, return); + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + xchg = (struct unf_xchg_s *)v_xchg; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange(%p) entry is NULL", + lport->port_id, xchg); + return; + } + + /* Get PRLI ACC payload */ + prli_acc_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prli_acc.payload; + if (xchg->byte_orders & UNF_BIT_2) { + /* Change to little End, About INI/TGT mode & confirm info */ + options = be32_to_cpu(prli_acc_pld->parms[3]) & + (UNF_FC4_FRAME_PARM_3_TGT | + UNF_FC4_FRAME_PARM_3_INI); + + cmnd = be32_to_cpu(prli_acc_pld->cmnd); + fcp_conf = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + rec_support = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_REC_SUPPORT; + task_retry_support = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; + retry_support = be32_to_cpu(prli_acc_pld->parms[3]) & + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; + + } else { + options = (prli_acc_pld->parms[3]) & + (UNF_FC4_FRAME_PARM_3_TGT | + UNF_FC4_FRAME_PARM_3_INI); + + cmnd = (prli_acc_pld->cmnd); + fcp_conf = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + rec_support = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_REC_SUPPORT; + task_retry_support = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; + retry_support = prli_acc_pld->parms[3] & + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLI RSP: RPort(0x%x) parameter-3(0x%x) option(0x%x) cmd(0x%x) rec support:%u", + rport->nport_id, prli_acc_pld->parms[3], options, + cmnd, rec_support); + + /* PRLI ACC: R_Port READY & Report R_Port Link Up */ + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* Update R_Port options(INI/TGT/BOTH) */ + rport->options = options; + + unf_update_port_feature(rport->port_name, rport->options); + + /* NOTE: R_Port only with INI mode, send LOGO */ + if (rport->options == UNF_PORT_MODE_INI) { + /* Update R_Port state: LOGO */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* NOTE: Start to Send LOGO */ + unf_rport_enter_logo(lport, rport); + return; + } + + /* About confirm */ + if (fcp_conf && + (lport->low_level_func.lport_cfg_items.fcp_conf != + UNF_FALSE)) { + rport->fcp_conf_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) FCP config is need for RPort(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id); + } + tape_support = (rec_support && task_retry_support && retry_support); + if (tape_support && + (lport->low_level_func.lport_cfg_items.tape_support != UNF_FALSE)) { + rport->tape_support_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]Port(0x%x_0x%x) Rec is enabled for RPort(0x%x)", + lport->port_id, lport->nport_id, rport->nport_id); + } + /* Update R_Port state: READY */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_READY); + rport_state = rport->rp_state; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Report R_Port online (Link Up) event to SCSI */ + if (rport_state == UNF_RPORT_ST_READY) { + rport->logo_retries = 0; + unf_update_lport_state_by_linkup_event( + lport, rport, rport->options); + } + } else { + /* PRLI RJT: Do R_Port error recovery */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x)<---LS_RJT(DID:0x%x SID:0x%x) for PRLI. RPort(0x%p) OX_ID(0x%x)", + lport->port_id, lport->nport_id, + rport->nport_id, rport, xchg->ox_id); + + unf_rport_error_recovery(rport); + } +} + +static void unf_prli_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do R_Port recovery */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3676, UNF_TRUE, v_xchg, return); + + UNF_REFERNCE_VAR(lport); + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3677, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3678, UNF_TRUE, rport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) RPort(0x%x) send PRLI failed and do recovery", + lport->port_id, lport->nport_id, rport->nport_id); + + /* Start to do R_Port error recovery */ + unf_rport_error_recovery(rport); + + UNF_REFERNCE_VAR(lport); +} + +unsigned int unf_send_prli(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_pril_payload_s *prli_pal = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3346, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3347, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + /* Get & Set new free exchange */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PRLI", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_PRLI; // PRLI + + ox_id = xchg->ox_id; + + /* Set callback function */ + /* for rcvd prli acc/rjt processer */ + xchg->pfn_callback = unf_prli_callback; + /* for send prli failed processer */ + xchg->pfn_ob_callback = unf_prli_ob_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill PRLI payload */ + prli_pal = &fc_entry->prli.payload; + memset(prli_pal, 0, sizeof(struct unf_pril_payload_s)); + unf_fill_prli_pld(prli_pal, v_lport); + + /* Start to Send RPLI ELS CMND */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLI send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_prlo_pld(struct unf_pril_payload_s *v_prlo_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3348, UNF_TRUE, v_prlo_pld, return); + UNF_CHECK_VALID(0x3349, UNF_TRUE, v_lport, return); + + v_prlo_pld->cmnd = (UNF_ELS_CMND_PRLO); + v_prlo_pld->parms[0] = (UNF_FC4_FRAME_PARM_0_FCP); + v_prlo_pld->parms[1] = UNF_NOT_MEANINGFUL; + v_prlo_pld->parms[2] = UNF_NOT_MEANINGFUL; + v_prlo_pld->parms[3] = UNF_NO_SERVICE_PARAMS; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_prlo_pld, + sizeof(struct unf_pril_payload_s)); +} + +unsigned int unf_send_prlo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_pril_payload_s *prlo_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3350, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3351, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get free exchange for PRLO */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PRLO", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_PRLO; /* PRLO */ + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill PRLO entry(payload) */ + prlo_pld = &fc_entry->prlo.payload; + memset(prlo_pld, 0, sizeof(struct unf_pril_payload_s)); + unf_fill_prlo_pld(prlo_pld, v_lport); + + /* Start to send PRLO command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLO send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rrq_pld(struct unf_rrq_s *v_rrq_pld, + struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x3360, UNF_TRUE, v_rrq_pld, return); + UNF_CHECK_VALID(0x3361, UNF_TRUE, v_xchg, return); + + v_rrq_pld->cmnd = UNF_ELS_CMND_RRQ; + v_rrq_pld->sid = v_xchg->sid; + v_rrq_pld->oxid_rxid = ((unsigned int)v_xchg->ox_id << 16 | + v_xchg->rx_id); +} + +static void unf_rrq_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* Release I/O */ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_els_acc_s *els_acc = NULL; + unsigned int cmnd = 0; + struct unf_xchg_s *io_xchg = NULL; + + UNF_CHECK_VALID(0x3696, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3697, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3698, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + + lport = (struct unf_lport_s *)v_lport; + UNF_REFERNCE_VAR(lport); + xchg = (struct unf_xchg_s *)v_xchg; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) exchange(0x%p) SfsEntryPtr is NULL", + lport->port_id, xchg); + return; + } + + io_xchg = (struct unf_xchg_s *)xchg->io_xchg; + if (!io_xchg) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) IO exchange is NULL. RRQ cb sfs xchg(0x%p) tag(0x%x)", + lport->port_id, xchg, xchg->hot_pool_tag); + return; + } + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) release IO exch(0x%p) tag(0x%x). RRQ cb sfs xchg(0x%p) tag(0x%x)", + lport->port_id, xchg->io_xchg, io_xchg->hot_pool_tag, + xchg, xchg->hot_pool_tag); + + /* NOTE: release I/O exchange resource */ + unf_xchg_ref_dec(io_xchg, XCHG_ALLOC); + + els_acc = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_acc; + if (xchg->byte_orders & UNF_BIT_2) + cmnd = be32_to_cpu(els_acc->cmnd); + else + cmnd = (els_acc->cmnd); +} + +static void unf_rrq_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Release I/O */ + struct unf_xchg_s *xchg = NULL; + struct unf_xchg_s *io_xchg = NULL; + + xchg = (struct unf_xchg_s *)v_xchg; + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Exchange can't be NULL"); + return; + } + + io_xchg = (struct unf_xchg_s *)xchg->io_xchg; + if (!io_xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]IO exchange can't be NULL with Sfs exch(0x%p) tag(0x%x)", + xchg, xchg->hot_pool_tag); + return; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_KEVENT, + "[info]send RRQ failed: SFS exch(0x%p) tag(0x%x) exch(0x%p) tag(0x%x) OXID_RXID(0x%x_0x%x) SID_DID(0x%x_0x%x)", + xchg, xchg->hot_pool_tag, io_xchg, io_xchg->hot_pool_tag, + io_xchg->ox_id, io_xchg->rx_id, io_xchg->sid, + io_xchg->did); + + /* NOTE: Free I/O exchange resource */ + unf_xchg_ref_dec(io_xchg, XCHG_ALLOC); +} + +unsigned int unf_send_rrq(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + /* after ABTS Done */ + struct unf_rrq_s *rrq_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3362, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3363, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3364, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + /* Get & Set New free Exchange for RRQ */ + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for RRQ", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = ELS_RRQ; // RRQ + + /* Set callback function */ + xchg->pfn_callback = unf_rrq_callback; // release I/O exchange context + /* release I/O exchange context */ + xchg->pfn_ob_callback = unf_rrq_ob_callback; + xchg->io_xchg = v_xchg; // pointer to IO XCHG + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill RRQ entry(payload) */ + rrq_pld = &fc_entry->rrq; + memset(rrq_pld, 0, sizeof(struct unf_rrq_s)); + unf_fill_rrq_pld(rrq_pld, v_xchg); + + /* Start to send RRQ command to remote port */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RRQ send %s. Port(0x%x)--->rport(0x%x) free old exchange(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, + v_xchg->hot_pool_tag, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gff_id_pld(struct unf_gffid_s *v_gff_id, + unsigned int v_nport_id) +{ + UNF_CHECK_VALID(0x3365, UNF_TRUE, v_gff_id, return); + + v_gff_id->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); + v_gff_id->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); + v_gff_id->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GFF_ID); + v_gff_id->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + v_gff_id->nport_id = v_nport_id; +} + +static void unf_gff_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Send PLOGI */ + struct unf_lport_s *lport = NULL; + struct unf_lport_s *root_lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x3611, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + nport_id = v_xchg->disc_port_id; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3612, UNF_TRUE, NULL != lport, return); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + /* Get (safe) R_Port */ + rport = unf_get_rport_by_nport_id(lport, nport_id); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't allocate new RPort(0x%x)", + lport->port_id, nport_id); + return; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) send GFF_ID(0x%x_0x%x) to RPort(0x%x_0x%x) abnormal", + lport->port_id, lport->nport_id, v_xchg->ox_id, + v_xchg->rx_id, rport->rport_index, rport->nport_id); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* NOTE: Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send PLOGI failed, enter recovry", + lport->port_id); + + /* Do R_Port recovery */ + unf_rport_error_recovery(rport); + } +} + +static void unf_check_rport_need_delay_plogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int v_port_feature) +{ + /* + * Called by: + * 1. Private loop + * 2. RCVD GFF_ID ACC + */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + unsigned int nport_id = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3613, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3614, UNF_TRUE, v_rport, return); + nport_id = rport->nport_id; + + /* + * Send GFF_ID means L_Port has INI attribute + ** + * When to send PLOGI: + * 1. R_Port has TGT mode (COM or TGT), send PLOGI immediately + * 2. R_Port only with INI, send LOGO immediately + * 3. R_Port with unknown attribute, delay to send PLOGI + */ + if ((v_port_feature & UNF_PORT_MODE_TGT) || + (lport->enhanced_features & + UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF)) { + /* R_Port has TGT mode: send PLOGI immediately */ + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, nport_id); + UNF_CHECK_VALID(0x3615, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI to RPort(0x%x) failed", + lport->port_id, lport->nport_id, nport_id); + + unf_rport_error_recovery(rport); + } + } else if (v_port_feature == UNF_PORT_MODE_INI) { + /* R_Port only with INI mode: can't send PLOGI --->>> + * LOGO/nothing + */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + if (rport->rp_state == UNF_RPORT_ST_INIT) { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send LOGO to RPort(0x%x) which only with INI mode", + lport->port_id, lport->nport_id, nport_id); + + /* Enter Closing state */ + unf_rport_enter_logo(lport, rport); + } else { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + } else { + /* Unknown R_Port attribute: Delay to send PLOGI */ + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, + nport_id); + UNF_CHECK_VALID(0x3616, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_delay_login(rport); + } +} + +static void unf_rcv_gff_id_acc(struct unf_lport_s *v_lport, + struct unf_gffid_rsp_s *v_gff_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Delay to LOGIN */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + struct unf_gffid_rsp_s *gff_id_rsp_pld = v_gff_id_rsp_pld; + unsigned int fc4feature = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3617, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3618, UNF_TRUE, v_gff_id_rsp_pld, return); + + fc4feature = gff_id_rsp_pld->fc_4_feature[1]; + if ((UNF_GFF_ACC_MASK & fc4feature) == 0) + fc4feature = be32_to_cpu(gff_id_rsp_pld->fc_4_feature[1]); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) RPort(0x%x) received GFF_ID ACC. FC4 feature is 0x%x(1:TGT,2:INI,3:COM)", + lport->port_id, lport->nport_id, v_nport_id, fc4feature); + + /* Check (& Get new) R_Port */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + rport = unf_find_rport(lport, v_nport_id, rport->port_name); + + if ((rport) || + (UNF_GET_PORT_OPTIONS(fc4feature) != UNF_PORT_MODE_INI)) { + rport = unf_get_safe_rport(lport, rport, + UNF_RPORT_REUSE_ONLY, + v_nport_id); + UNF_CHECK_VALID(0x3619, UNF_TRUE, NULL != rport, return); + } else { + return; + } + + if ((fc4feature & UNF_GFF_ACC_MASK) != 0) { + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->options = UNF_GET_PORT_OPTIONS(fc4feature); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } else if (rport->port_name != INVALID_WWPN) { + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->options = unf_get_port_feature(rport->port_name); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + + /* NOTE: Send PLOGI if necessary */ + unf_check_rport_need_delay_plogi(lport, rport, rport->options); +} + +static void unf_rcv_gff_id_rjt(struct unf_lport_s *v_lport, + struct unf_gffid_rsp_s *v_gff_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Delay LOGIN or LOGO */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + struct unf_gffid_rsp_s *gff_id_rsp_pld = v_gff_id_rsp_pld; + unsigned int rjt_reason = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3620, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3621, UNF_TRUE, v_gff_id_rsp_pld, return); + + /* Check (& Get new) R_Port */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + rport = unf_find_rport(lport, v_nport_id, rport->port_name); + + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) get RPort by N_Port_ID(0x%x) failed and alloc new", + lport->port_id, v_nport_id); + + rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, + v_nport_id); + UNF_CHECK_VALID(0x3622, UNF_TRUE, NULL != rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + + rjt_reason = gff_id_rsp_pld->ctiu_pream.frag_reason_exp_vend; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GFF_ID for RPort(0x%x) but was rejected. Reason code(0x%x)", + lport->port_id, v_nport_id, rjt_reason); + + if (!UNF_GNN_GFF_ID_RJT_REASON(rjt_reason)) { + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, + v_nport_id); + UNF_CHECK_VALID(0x3623, UNF_TRUE, NULL != rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Delay to send PLOGI */ + unf_rport_delay_login(rport); + } else { + spin_lock_irqsave(&rport->rport_state_lock, flag); + if (rport->rp_state == UNF_RPORT_ST_INIT) { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Enter closing state */ + unf_rport_enter_logo(lport, rport); + } else { + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + } +} + +static void unf_gff_id_callback(void *v_lport, void *v_sns_port, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_lport_s *root_lport = NULL; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_gffid_rsp_s *gff_id_rsp_pld = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int nport_id = 0; + + UNF_CHECK_VALID(0x3626, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3627, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3628, UNF_TRUE, v_xchg, return); + + UNF_REFERNCE_VAR(v_sns_port); + nport_id = xchg->disc_port_id; + + gff_id_rsp_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gff_id_rsp; + cmnd_rsp_size = (gff_id_rsp_pld->ctiu_pream.cmnd_rsp_size); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* Case for GFF_ID ACC: (Delay)PLOGI */ + unf_rcv_gff_id_acc(lport, gff_id_rsp_pld, nport_id); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + /* Case for GFF_ID RJT: Delay PLOGI or LOGO directly */ + unf_rcv_gff_id_rjt(lport, gff_id_rsp_pld, nport_id); + } else { + /* Send PLOGI */ + unf_rcv_gff_id_rsp_unknown(lport, nport_id); + } +} + +unsigned int unf_send_gff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + struct unf_gffid_s *gff_id = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3367, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) + /* Lport is invalid, no retry or handle required, return ok */ + return RETURN_OK; + + root_lport = (struct unf_lport_s *)v_lport->root_lport; + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_sns_port->nport_id, + v_sns_port, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GFF_ID", + v_lport->port_id); + + return unf_get_and_post_disc_event(v_lport, v_sns_port, + v_nport_id, + UNF_DISC_GET_FEATURE); + } + + xchg->cmnd_code = NS_GFF_ID; /* GFF_ID */ + + xchg->disc_port_id = v_nport_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gff_id_ob_callback; /* send PLOGI */ + xchg->pfn_callback = unf_gff_id_callback; /* send PLOGI or LOGO */ + + ox_id = xchg->ox_id; + + unf_fill_package(&pkg, xchg, v_sns_port); + + /* Fill GFF_ID payload(entry) */ + gff_id = &fc_entry->gff_id; /* GFF_ID */ + memset(gff_id, 0, sizeof(struct unf_gffid_s)); + unf_fill_gff_id_pld(gff_id, v_nport_id); + + /* Send GFF_ID GS command now */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + else + atomic_dec( + &root_lport->disc.disc_thread_info.disc_contrl_size); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GFF_ID send %s. Port(0x%x)--->rport(0x%x). Inquire RPort(0x%x) OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_sns_port->nport_id, + v_nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gnn_id_pld(struct unf_gnnid_s *v_gnn_id_pld, + unsigned int v_nport_id) +{ + /* Inquiry R_Port node name from SW */ + UNF_CHECK_VALID(0x3368, UNF_TRUE, v_gnn_id_pld, return); + + v_gnn_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_gnn_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_gnn_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_GNN_ID; + v_gnn_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + v_gnn_id_pld->nport_id = v_nport_id; +} + +/* + * Function Name : unf_gnn_id_ob_callback + * Function Description: Callback for sending GNN_ID abnormal + * Input Parameters : struct unf_xchg_s *v_xchg + * Output Parameters : N/A + * Return Type : void + */ +static void unf_gnn_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Send GFF_ID */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *sns_port = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3597, UNF_TRUE, v_xchg, return); + lport = v_xchg->lport; + UNF_CHECK_VALID(0x3598, UNF_TRUE, lport, return); + sns_port = v_xchg->rport; + UNF_CHECK_VALID(0x3599, UNF_TRUE, sns_port, return); + nport_id = v_xchg->disc_port_id; + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GNN_ID failed to inquire RPort(0x%x)", + lport->port_id, nport_id); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + /* NOTE: continue next stage */ + ret = unf_get_and_post_disc_event(lport, sns_port, nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, nport_id); // send PLOGI + } +} + +static void unf_rcv_gnn_id_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + struct unf_gnnid_rsp_s *v_gnn_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Send GFF_ID or Link down immediately */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *sns_port = v_sns_port; + struct unf_gnnid_rsp_s *gnn_id_rsp_pld = v_gnn_id_rsp_pld; + struct unf_rport_s *rport = NULL; + unsigned long long node_name = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3600, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3601, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3602, UNF_TRUE, v_gnn_id_rsp_pld, return); + + node_name = ((unsigned long long)(gnn_id_rsp_pld->node_name[0]) << + 32) | + ((unsigned long long)(gnn_id_rsp_pld->node_name[1])); + + if (node_name == lport->node_name) { + /* R_Port & L_Port with same Node Name */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) has the same node name(0x%llx) with RPort(0x%x), linkdown it", + lport->port_id, node_name, v_nport_id); + + /* Destroy immediately */ + unf_rport_immediate_linkdown(lport, rport); + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x) got RPort(0x%x) with node name(0x%llx) by GNN_ID", + lport->port_id, v_nport_id, node_name); + + /* Start to Send GFF_ID */ + ret = unf_get_and_post_disc_event(lport, sns_port, v_nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, + v_nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, v_nport_id); + } + } +} + +static void unf_rcv_gnn_id_rjt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + struct unf_gnnid_rsp_s *v_gnn_id_rsp_pld, + unsigned int v_nport_id) +{ + /* Send GFF_ID */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *sns_port = v_sns_port; + struct unf_gnnid_rsp_s *gnn_id_rsp_pld = v_gnn_id_rsp_pld; + unsigned int rjt_reason = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3603, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3604, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3605, UNF_TRUE, v_gnn_id_rsp_pld, return); + + rjt_reason = (gnn_id_rsp_pld->ctiu_pream.frag_reason_exp_vend); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) GNN_ID was rejected with reason code(0x%x)", + lport->port_id, lport->nport_id, rjt_reason); + + if (!UNF_GNN_GFF_ID_RJT_REASON(rjt_reason)) { + /* Node existence: Continue next stage */ + ret = unf_get_and_post_disc_event(lport, sns_port, v_nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, + UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, + v_nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, v_nport_id); + } + } +} + +static void unf_gnn_id_callback(void *v_lport, void *v_sns_port, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_rport_s *sns_port = (struct unf_rport_s *)v_sns_port; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_gnnid_rsp_s *gnn_id_rsp_pld = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3608, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3609, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3610, UNF_TRUE, v_xchg, return); + + nport_id = xchg->disc_port_id; + gnn_id_rsp_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gnn_id_rsp; + cmnd_rsp_size = (gnn_id_rsp_pld->ctiu_pream.cmnd_rsp_size); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* Case ACC: send GFF_ID or Link down immediately */ + unf_rcv_gnn_id_acc(lport, sns_port, gnn_id_rsp_pld, nport_id); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + /* Case RJT: send GFF_ID */ + unf_rcv_gnn_id_rjt(lport, sns_port, gnn_id_rsp_pld, nport_id); + } else { /* NOTE: continue next stage */ + /* Case unknown: send GFF_ID */ + unf_rcv_gnn_id_rsp_unknown(lport, sns_port, nport_id); + } +} + +unsigned int unf_send_gnn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + /* from DISC stop/re-login */ + struct unf_gnnid_s *gnn_id_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3370, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) + /* Lport is invalid, no retry or handle required, return ok */ + return RETURN_OK; + + root_lport = (struct unf_lport_s *)v_lport->root_lport; + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_sns_port->nport_id, + v_sns_port, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) exchange can't be NULL for GNN_ID", + v_lport->port_id); + + return unf_get_and_post_disc_event(v_lport, v_sns_port, + v_nport_id, + UNF_DISC_GET_NODE_NAME); + } + + xchg->cmnd_code = NS_GNN_ID; /* GNN_ID */ + xchg->disc_port_id = v_nport_id; + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gnn_id_ob_callback; /* send GFF_ID */ + xchg->pfn_callback = unf_gnn_id_callback; /* send GFF_ID */ + + unf_fill_package(&pkg, xchg, v_sns_port); + + /* Fill GNN_ID entry(payload) */ + gnn_id_pld = &fc_entry->gnn_id; /* GNNID payload */ + memset(gnn_id_pld, 0, sizeof(struct unf_gnnid_s)); + unf_fill_gnn_id_pld(gnn_id_pld, v_nport_id); + + /* Start to send GNN_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + else + atomic_dec( + &root_lport->disc.disc_thread_info.disc_contrl_size); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GNN_ID send %s. Port(0x%x_0x%x)--->rport(0x%x) inquire Nportid(0x%x) OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", v_lport->port_id, + v_lport->nport_id, v_sns_port->nport_id, + v_nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gpn_id_pld(struct unf_gpnid_s *v_gpn_id_pld, + unsigned int v_nport_id) +{ + UNF_CHECK_VALID(0x3371, UNF_TRUE, v_gpn_id_pld, return); + + v_gpn_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_gpn_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_gpn_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_GPN_ID; + v_gpn_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + /* Inquiry WWN from SW */ + v_gpn_id_pld->nport_id = v_nport_id; +} + +unsigned int unf_rport_relogin(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + /* Send GNN_ID */ + struct unf_rport_s *sns_port = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3563, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* Get SNS R_Port */ + sns_port = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find fabric Port", + v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + /* Send GNN_ID now to SW */ + ret = unf_get_and_post_disc_event(v_lport, sns_port, v_nport_id, + UNF_DISC_GET_NODE_NAME); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + v_lport->nport_id, UNF_DISC_GET_NODE_NAME, + v_nport_id); + + /* NOTE: Continue to next stage */ + unf_rcv_gnn_id_rsp_unknown(v_lport, sns_port, v_nport_id); + } + + return ret; +} + +static void unf_rcv_gpn_id_acc(struct unf_lport_s *v_lport, + unsigned int v_nport_id, + unsigned long long v_port_name) +{ + /* then PLOGI or re-login */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + rport = unf_find_valid_rport(lport, v_port_name, v_nport_id); + if (rport) { + /* R_Port with TGT mode & L_Port with INI mode: + * send PLOGI with INIT state + */ + if ((rport->options & UNF_PORT_MODE_TGT) == + UNF_PORT_MODE_TGT) { + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_INIT, + v_nport_id); + UNF_CHECK_VALID(0x3630, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI failed for 0x%x, enter recovry", + lport->port_id, lport->nport_id, + v_nport_id); + + unf_rport_error_recovery(rport); + } + } else { + spin_lock_irqsave(&rport->rport_state_lock, flag); + if ((rport->rp_state != UNF_RPORT_ST_PLOGI_WAIT) && + (rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) && + (rport->rp_state != UNF_RPORT_ST_READY)) { + unf_rport_state_ma(rport, + UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore( + &rport->rport_state_lock, flag); + + /* Do LOGO operation */ + unf_rport_enter_logo(lport, rport); + } else { + spin_unlock_irqrestore( + &rport->rport_state_lock, flag); + } + } + } else { + /* Send GNN_ID */ + (void)unf_rport_relogin(lport, v_nport_id); + } +} + +static void unf_rcv_gpn_id_rjt(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x3631, UNF_TRUE, v_lport, return); + + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + unf_rport_linkdown(lport, rport); /* Do R_Port Link down */ +} + +/* + * Function Name : unf_rcv_gpn_id_rsp_unknown + * Function Description: Process unknown type of GPN_ID response + * Input Parameters : struct unf_lport_s *v_lport + * : unsigned int v_nport_id + * Output Parameters : N/A + * Return Type : void + */ +void unf_rcv_gpn_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + struct unf_lport_s *lport = v_lport; + + UNF_CHECK_VALID(0x3632, UNF_TRUE, v_lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) wrong response of GPN_ID with RPort(0x%x)", + lport->port_id, v_nport_id); + + /* NOTE: go to next stage */ + (void)unf_rport_relogin(lport, v_nport_id); +} + +static void unf_gpn_id_callback(void *v_lport, void *v_sns_port, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_gpnid_rsp_s *gpn_id_rsp_pld = NULL; + unsigned long long port_name = 0; + unsigned int cmnd_rsp_size = 0; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3635, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3636, UNF_TRUE, v_sns_port, return); + UNF_CHECK_VALID(0x3637, UNF_TRUE, v_xchg, return); + + UNF_REFERNCE_VAR(v_sns_port); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_xchg; + nport_id = xchg->disc_port_id; + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + gpn_id_rsp_pld = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gpn_id_rsp; + cmnd_rsp_size = gpn_id_rsp_pld->ctiu_pream.cmnd_rsp_size; + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* GPN_ID ACC */ + port_name = ((unsigned long long) + (gpn_id_rsp_pld->port_name[0]) << 32) | + ((unsigned long long) + (gpn_id_rsp_pld->port_name[1])); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x) GPN_ID ACC with WWN(0x%llx) RPort NPort ID(0x%x)", + lport->port_id, port_name, nport_id); + + /* Send PLOGI or LOGO or GNN_ID */ + unf_rcv_gpn_id_acc(lport, nport_id, port_name); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == + UNF_CT_IU_REJECT) { + /* GPN_ID RJT: Link Down */ + unf_rcv_gpn_id_rjt(lport, nport_id); + } else { + /* GPN_ID response type unknown: Send GNN_ID */ + unf_rcv_gpn_id_rsp_unknown(lport, nport_id); + } +} + +static void unf_gpn_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + unsigned int nport_id = 0; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3633, UNF_TRUE, v_xchg, return); + + lport = v_xchg->lport; + nport_id = v_xchg->disc_port_id; + UNF_CHECK_VALID(0x3634, UNF_TRUE, lport, return); + + root_lport = (struct unf_lport_s *)lport->root_lport; + atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); + wake_up_process(root_lport->disc.disc_thread_info.data_thread); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GPN_ID failed to inquire RPort(0x%x)", + lport->port_id, nport_id); + + /* NOTE: go to next stage */ + (void)unf_rport_relogin(lport, nport_id); +} + +unsigned int unf_send_gpn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + struct unf_gpnid_s *gpn_id_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + struct unf_lport_s *root_lport = NULL; + + UNF_CHECK_VALID(0x3374, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + if (unf_is_lport_valid(v_lport) != RETURN_OK) { + /* Lport is invalid, no retry or handle required, return ok */ + return RETURN_OK; + } + root_lport = (struct unf_lport_s *)v_lport->root_lport; + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_sns_port->nport_id, + v_sns_port, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GPN_ID", + v_lport->port_id); + + return unf_get_and_post_disc_event(v_lport, v_sns_port, + v_nport_id, + UNF_DISC_GET_PORT_NAME); + } + + xchg->cmnd_code = NS_GPN_ID; // GPN_ID + xchg->disc_port_id = v_nport_id; + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_gpn_id_callback; + /* re-login --->>> GNN_ID */ + xchg->pfn_ob_callback = unf_gpn_id_ob_callback; + + unf_fill_package(&pkg, xchg, v_sns_port); + + /* Fill GPN_ID entry(payload) */ + gpn_id_pld = &fc_entry->gpn_id; /* GPN_ID payload */ + memset(gpn_id_pld, 0, sizeof(struct unf_gpnid_s)); + unf_fill_gpn_id_pld(gpn_id_pld, v_nport_id); + + /* Send GPN_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + else + atomic_dec( + &root_lport->disc.disc_thread_info.disc_contrl_size); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GPN_ID send %s. Port(0x%x)--->rport(0x%x). Inquire RPort(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", v_lport->port_id, + v_sns_port->nport_id, v_nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gid_ft_pld(struct unf_gid_s *v_gid_pld) +{ + UNF_CHECK_VALID(0x3376, UNF_TRUE, v_gid_pld, return); + + v_gid_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_gid_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_gid_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_GID_FT; + v_gid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + v_gid_pld->scope_type = UNF_GID_FT_TYPE; +} + +static void unf_gid_ft_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + union unf_sfs_u *sfs_ptr = NULL; + struct unf_disc_s *disc = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3589, UNF_TRUE, v_xchg, return); + + sfs_ptr = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!sfs_ptr) + return; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + if (!lport) + return; + + disc = &lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery operation */ + unf_disc_error_recovery(lport); +} + +unsigned int unf_send_gid_ft(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + struct unf_gid_s *gid_pld = NULL; + struct unf_gid_rsp_s *gid_rsp = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3377, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3378, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GID_FT", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_GID_FT; // GID_FT + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gid_ft_ob_callback; // do DISC recovery + xchg->pfn_callback = unf_gid_ft_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill GID_FT entry(payload) */ + gid_pld = &fc_entry->get_id.gid_req; /* GID req payload */ + unf_fill_gid_ft_pld(gid_pld); + gid_rsp = &fc_entry->get_id.gid_rsp; /* GID rsp payload */ + + /* Get GID_FT Response payload */ + gid_acc_pld = (struct unf_gif_acc_pld_s *)unf_get_one_big_sfs_buf(xchg); + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) allocate GID_FT response buffer failed", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + memset(gid_acc_pld, 0, sizeof(struct unf_gif_acc_pld_s)); + gid_rsp->gid_acc_pld = gid_acc_pld; + + /* Send GID_FT GS commmand now */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GID_FT send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_gid_pt_pld(struct unf_gid_s *v_gid_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3379, UNF_TRUE, v_gid_pld, return); + UNF_CHECK_VALID(0x3380, UNF_TRUE, v_lport, return); + + v_gid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); + v_gid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); + v_gid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GID_PT); + v_gid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + + /* 0x7F000000 means NX_Port */ + v_gid_pld->scope_type = UNF_GID_PT_TYPE; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, v_gid_pld, + sizeof(struct unf_gid_s)); +} + +static void unf_gid_pt_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + union unf_sfs_u *sfs_ptr = NULL; + struct unf_disc_s *disc = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3593, UNF_TRUE, v_xchg, return); + + sfs_ptr = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!sfs_ptr) + return; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + if (!lport) + return; + + disc = &lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery operation */ + unf_disc_error_recovery(lport); +} + +unsigned int unf_send_gid_pt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* from DISC start */ + struct unf_gid_s *gid_pld = NULL; + struct unf_gid_rsp_s *gid_rsp = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3381, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3382, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for GID_PT", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_GID_PT; /* GID_PT */ + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_ob_callback = unf_gid_pt_ob_callback; /* do DISC recovery */ + xchg->pfn_callback = unf_gid_pt_callback; + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill GID_PT entry(payload) */ + gid_pld = &fc_entry->get_id.gid_req; /* GID req payload */ + unf_fill_gid_pt_pld(gid_pld, v_lport); + gid_rsp = &fc_entry->get_id.gid_rsp; /* GID rsp payload */ + + /* Get GID_PT response payload */ + gid_acc_pld = (struct unf_gif_acc_pld_s *)unf_get_one_big_sfs_buf(xchg); + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%0x) Allocate GID_PT response buffer failed", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + memset(gid_acc_pld, 0, sizeof(struct unf_gif_acc_pld_s)); + gid_rsp->gid_acc_pld = gid_acc_pld; + + /* Send GID_PT GS command to SW */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: GID_PT send %s. Port(0x%x_0x%x)--->rport(0x%x) with OXID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rft_id_pld(struct unf_rftid_s *v_rft_id_pld, + struct unf_lport_s *v_lport) +{ + unsigned int i = 1; + + UNF_CHECK_VALID(0x3383, UNF_TRUE, v_rft_id_pld, return); + UNF_CHECK_VALID(0x3384, UNF_TRUE, v_lport, return); + + v_rft_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_rft_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_rft_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_RFT_ID; + v_rft_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + v_rft_id_pld->nport_id = (v_lport->nport_id); + v_rft_id_pld->fc_4_types[0] = (UNF_FC4_SCSI_BIT8); + + for (i = 1; i < 8; i++) + v_rft_id_pld->fc_4_types[i] = 0; +} + +static void unf_rft_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3687, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3688, UNF_TRUE, lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send RFT_ID failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery operation */ + unf_lport_error_recovery(lport); +} + +static void unf_rft_id_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RFT_ID --->>> RFF_ID */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_ctiu_prem_s *ctiu_prem = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmnd_rsp_size = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3689, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3690, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3691, UNF_TRUE, v_xchg, return); + + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + xchg = (struct unf_xchg_s *)v_xchg; + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) SFS entry is NULL with state(0x%x)", + lport->port_id, lport->en_states); + return; + } + + ctiu_prem = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rft_id_rsp.ctiu_pream; + cmnd_rsp_size = ctiu_prem->cmnd_rsp_size; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) RFT_ID response is (0x%x)", + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), + lport->port_id, lport->nport_id); + + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + /* Case for RFT_ID ACC: send RFF_ID */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_RFT_ID_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) receive RFT_ID ACC in state(0x%x)", + lport->port_id, lport->nport_id, + lport->en_states); + + return; + } + + /* LPort: RFT_ID_WAIT --> RFF_ID_WAIT */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Start to send RFF_ID GS command */ + ret = unf_send_rff_id(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send RFF_ID failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); + } + } else { + /* Case for RFT_ID RJT: do recovery */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) receive RFT_ID RJT with reason_code(0x%x) explanation(0x%x)", + lport->port_id, lport->nport_id, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_REASON_MASK, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_EXPLAN_MASK); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); + } +} + +unsigned int unf_send_rft_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* After PLOGI process */ + struct unf_rftid_s *rft_id = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3385, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3386, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for RFT_ID", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_RFT_ID; /* RFT_ID */ + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_rft_id_callback; + xchg->pfn_ob_callback = unf_rft_id_ob_callback; /* Do L_Port recovery */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill RFT_ID entry(payload) */ + rft_id = &fc_entry->rft_id; + memset(rft_id, 0, sizeof(struct unf_rftid_s)); + unf_fill_rft_id_pld(rft_id, v_lport); + + /* Send RFT_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: RFT_ID send %s. Port(0x%x_0x%x)--->rport(0x%x). rport(0x%p) wwpn(0x%llx) OX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, v_rport->nport_id, + v_rport, v_rport->port_name, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_fill_rff_id_pld(struct unf_rffid_s *v_rff_id_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3387, UNF_TRUE, v_rff_id_pld, return); + UNF_CHECK_VALID(0x3388, UNF_TRUE, v_lport, return); + + v_rff_id_pld->ctiu_pream.rev_inid = UNF_REV_NPORTID_INIT; + v_rff_id_pld->ctiu_pream.gstype_gssub_options = UNF_FSTYPE_OPT_INIT; + v_rff_id_pld->ctiu_pream.cmnd_rsp_size = UNF_FSTYPE_RFF_ID; + v_rff_id_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; + v_rff_id_pld->nport_id = v_lport->nport_id; + v_rff_id_pld->fc_4_feature = UNF_FC4_FCP_TYPE | + (v_lport->options << 4); +} + +static void unf_rff_id_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* RFF_ID --->>> SCR(for INI mode) */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_ctiu_prem_s *ctiu_prem = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int cmnd_rsp_size = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3684, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3685, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3686, UNF_TRUE, v_xchg, return); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_xchg; + if (unlikely(!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr)) + return; + + /* SCR send to 0xfffffd(not 0xfffffc), need to get new R_Port */ + UNF_REFERNCE_VAR(v_rport); + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FCTRL); // 0xfffffd + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + UNF_FC_FID_FCTRL); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't allocate RPort(0x%x)", + lport->port_id, UNF_FC_FID_FCTRL); + return; + } + + rport->nport_id = UNF_FC_FID_FCTRL; + ctiu_prem = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rff_id_rsp.ctiu_pream; + cmnd_rsp_size = ctiu_prem->cmnd_rsp_size; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x_0x%x) RFF_ID rsp is (0x%x)", + lport->port_id, lport->nport_id, + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)); + + /* RSP Type check: some SW not support RFF_ID, go to next stage also */ + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) receive RFF ACC(0x%x) in state(0x%x)", + lport->port_id, lport->nport_id, + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), + lport->en_states); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) receive RFF RJT(0x%x) in state(0x%x) with RJT reason code(0x%x) explanation(0x%x)", + lport->port_id, lport->nport_id, + (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), + lport->en_states, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_REASON_MASK, + (ctiu_prem->frag_reason_exp_vend) & + UNF_CT_IU_EXPLAN_MASK); + } + + /* L_Port state check */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_RFF_ID_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) receive RFF reply in state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + return; + } + + /* Update L_Port state & Send SCR to remote port */ + /* LPort: RFF_ID_WAIT --> SCR_WAIT */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* Start to send SCR command */ + ret = unf_send_scr(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send SCR failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); + } +} + +static void unf_rff_id_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Do recovery */ + struct unf_lport_s *lport = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3682, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flag); + lport = v_xchg->lport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flag); + + UNF_CHECK_VALID(0x3683, UNF_TRUE, NULL != lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send RFF_ID failed", + lport->port_id, lport->nport_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(lport); +} + +unsigned int unf_send_rff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* from RFT_ID, then Send SCR */ + struct unf_rffid_s *rff_id = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_CHECK_VALID(0x3389, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3390, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "%s Enter", __func__); + + xchg = unf_get_sfs_free_xchg_and_init(v_lport, v_rport->nport_id, + v_rport, &fc_entry); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for RFF_ID", + v_lport->port_id); + + return ret; + } + + xchg->cmnd_code = NS_RFF_ID; // RFF_ID + + ox_id = xchg->ox_id; + + /* Set callback function */ + xchg->pfn_callback = unf_rff_id_callback; + xchg->pfn_ob_callback = unf_rff_id_ob_callback; /* Do L_Port recovery */ + + unf_fill_package(&pkg, xchg, v_rport); + + /* Fill RFF_ID entry(payload) */ + rff_id = &fc_entry->rff_id; + memset(rff_id, 0, sizeof(struct unf_rffid_s)); + unf_fill_rff_id_pld(rff_id, v_lport); + + /* Send RFF_ID GS command */ + ret = unf_gs_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: RFF_ID feature 0x%x(10:TGT,20:INI,30:COM) send %s. Port(0x%x_0x%x)--->pstRPortid(0x%x) rport(0x%p) OX_ID(0x%x)", + v_lport->options, (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport, ox_id); + + UNF_REFERNCE_VAR(ox_id); + return ret; +} + +static void unf_login_with_rport_in_n2n(struct unf_lport_s *v_lport, + unsigned long long v_remote_port_name, + unsigned long long v_remote_nort_name) +{ + /* + * Call by (P2P): + * 1. RCVD FLOGI ACC + * 2. Send FLOGI ACC succeed + ** + * Compare WWN, larger is master, then send PLOGI + */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long lport_flag = 0; + unsigned long rport_flag = 0; + unsigned long long port_name = 0; + unsigned long long node_name = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3539, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&lport->lport_state_lock, lport_flag); + /* LPort: FLOGI_WAIT --> READY */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_READY); + spin_unlock_irqrestore(&lport->lport_state_lock, lport_flag); + + port_name = v_remote_port_name; + node_name = v_remote_nort_name; + + if (lport->port_name > port_name) { + /* Master case: send PLOGI */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x)'s WWN(0x%llx) is larger than rport(0x%llx), should be master", + lport->port_id, lport->port_name, port_name); + + /* Update N_Port_ID now: 0xEF */ + lport->nport_id = UNF_P2P_LOCAL_NPORT_ID; + + rport = unf_find_valid_rport(v_lport, port_name, + UNF_P2P_REMOTE_NPORT_ID); // 0xD6 + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, + UNF_P2P_REMOTE_NPORT_ID); + if (rport) { + rport->node_name = node_name; + rport->port_name = port_name; + rport->nport_id = UNF_P2P_REMOTE_NPORT_ID; // 0xD6 + rport->local_nport_id = UNF_P2P_LOCAL_NPORT_ID; // 0xEF + + spin_lock_irqsave(&rport->rport_state_lock, + rport_flag); + if ((rport->rp_state == UNF_RPORT_ST_PLOGI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, + UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Port(0x%x) Rport(0x%x) have sent PLOGI or PRLI with state(0x%x)", + lport->port_id, rport->nport_id, + rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_flag); + return; + } + /* Update L_Port State: PLOGI_WAIT */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, + rport_flag); + + /* P2P with master: Start to Send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x) with WWN(0x%llx) send PLOGI to(0x%llx) failed", + lport->port_id, lport->port_name, + port_name); + + unf_rport_error_recovery(rport); + } + } else { + /* Get/Alloc R_Port failed */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) with WWN(0x%llx) allocate RPort(ID:0x%x,WWPN:0x%llx) failed", + lport->port_id, lport->port_name, + UNF_P2P_REMOTE_NPORT_ID, port_name); + } + } else { + /* Slave case: L_Port's Port Name is smaller than R_Port */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) with WWN(0x%llx) is smaller than rport(0x%llx), do nothing", + lport->port_id, lport->port_name, port_name); + } +} + +static void unf_flogi_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Callback for Sending FLOGI ACC succeed */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + unsigned long long port_name = 0; + unsigned long long node_name = 0; + + UNF_CHECK_VALID(0x3457, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3458, UNF_TRUE, v_xchg->lport, return); + UNF_CHECK_VALID(0x3459, UNF_TRUE, v_xchg->rport, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + spin_lock_irqsave(&rport->rport_state_lock, flags); + port_name = rport->port_name; + node_name = rport->node_name; + + /* Swap case: Set WWPN & WWNN with zero */ + rport->port_name = 0; + rport->node_name = 0; + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Enter PLOGI stage: after send FLOGI ACC succeed */ + unf_login_with_rport_in_n2n(lport, port_name, node_name); +} + +unsigned int unf_send_flogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_flogi_payload_s *flogi_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3393, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3394, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3395, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_FLOGI); + + v_xchg->did = 0; /* D_ID must be 0 */ + v_xchg->sid = UNF_FC_FID_FLOGI; /* S_ID must be 0xfffffe */ + v_xchg->oid = v_xchg->sid; + v_xchg->pfn_callback = NULL; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + /* call back for sending FLOGI response */ + v_xchg->pfn_ob_callback = unf_flogi_acc_ob_callback; + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_REG_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + unf_fill_package(&pkg, v_xchg, v_rport); + + /* Fill FLOGI ACC payload */ + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + flogi_acc_pld = &fc_entry->flogi_acc.flogi_payload; + flogi_acc_pld->cmnd = (UNF_ELS_CMND_ACC); + unf_fill_flogi_pld(flogi_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + /* Send FLOGI ACC to remote port */ + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]LOGIN: FLOGI ACC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_plogi_acc_pld(struct unf_plogi_payload_s *v_plogi_acc_pld, + struct unf_lport_s *v_lport) +{ + struct unf_lgn_parms_s *login_parms = NULL; + + UNF_CHECK_VALID(0x3396, UNF_TRUE, v_plogi_acc_pld, return); + UNF_CHECK_VALID(0x3397, UNF_TRUE, v_lport, return); + + v_plogi_acc_pld->cmnd = (UNF_ELS_CMND_ACC); + login_parms = &v_plogi_acc_pld->parms; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + login_parms->co_parms.bb_credit = + unf_low_level_bb_credit(v_lport); + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_NFPORT; /* 0 */ + login_parms->co_parms.bb_scn = + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) ? + 0 : unf_low_level_bbscn(v_lport); + } else { + login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; /* 1 */ + } + + login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; + login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; + login_parms->co_parms.continuously_increasing = + UNF_CONTIN_INCREASE_SUPPORT; + login_parms->co_parms.bb_receive_data_field_size = + v_lport->max_frame_size; + login_parms->co_parms.nport_total_concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); + login_parms->co_parms.e_d_tov = (v_lport->ed_tov); + login_parms->cl_parms[2].valid = UNF_CLASS_VALID; /* class-3 */ + login_parms->cl_parms[2].received_data_field_size = + v_lport->max_frame_size; + login_parms->cl_parms[2].concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->cl_parms[2].open_sequences_per_exchange = + UNF_PLOGI_SEQ_PER_XCHG; + login_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + login_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + login_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + login_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_plogi_acc_pld, + sizeof(struct unf_plogi_payload_s)); +} + +static void unf_schedule_open_work(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Used for L_Port port only with TGT, or R_Port only with INI */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + unsigned long delay = 0; + unsigned long flag = 0; + unsigned int ret = 0; + unsigned int port_feature = INVALID_VALUE32; + + UNF_CHECK_VALID(0x3452, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3453, UNF_TRUE, v_rport, return); + + delay = (unsigned long)lport->ed_tov; + port_feature = rport->options & UNF_PORT_MODE_BOTH; + + if ((lport->options == UNF_PORT_MODE_TGT) || + (port_feature == UNF_PORT_MODE_INI)) { + spin_lock_irqsave(&rport->rport_state_lock, flag); + + ret = unf_rport_ref_inc(rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) abnormal, no need open", + lport->port_id, lport->nport_id, + rport->nport_id); + + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + return; + } + + /* Delay work pending check */ + if (delayed_work_pending(&rport->open_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) open work is running, no need re-open", + lport->port_id, lport->nport_id, + rport->nport_id); + + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + unf_rport_ref_dec(rport); + return; + } + + /* start open work */ + if (queue_delayed_work( + unf_work_queue, + &rport->open_work, + (unsigned long) + msecs_to_jiffies((unsigned int)delay))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) start open work", + lport->port_id, lport->nport_id, + rport->nport_id); + + (void)unf_rport_ref_inc(rport); + } + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + unf_rport_ref_dec(rport); + } +} + +static void unf_plogi_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3454, UNF_TRUE, v_xchg, return); + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + lport = v_xchg->lport; + rport = v_xchg->rport; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + UNF_CHECK_VALID(0x3455, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3456, UNF_TRUE, rport, return); + + /* + * 1. According to FC-LS 4.2.7.1: + * after RCVD PLOGI or sending PLOGI ACC, need to termitate open EXCH + */ + unf_cm_xchg_mgr_abort_io_by_id(lport, rport, rport->nport_id, + lport->nport_id, 0); + + /* 2. Send PLOGI ACC fail */ + if (v_xchg->ob_callback_sts != UNF_IO_SUCCESS) { + /* Do R_Port recovery */ + unf_rport_error_recovery(rport); + + /* Do not care: Just used for L_Port only is + * TGT mode or R_Port only is INI mode + */ + unf_schedule_open_work(lport, rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x_0x%x) send PLOGI ACC failed(0x%x) with RPort(0x%x) feature(0x%x)", + lport->port_id, lport->nport_id, + lport->options, v_xchg->ob_callback_sts, + rport->nport_id, rport->options); + + /* NOTE: return */ + return; + } + + /* 3. Private Loop: check whether or not need to send PRLI */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + if ((lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) && + ((rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_READY))) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) with State(0x%x) return directly", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + /* Do nothing */ + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + return; + } + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PRLI); // PRLI_WAIT + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* 4. Set Port Feature with BOTH: cancel */ + if ((rport->options == UNF_PORT_MODE_UNKNOWN) && + (rport->port_name != INVALID_WWPN)) + rport->options = unf_get_port_feature(rport->port_name); + + /* + * 5. Check whether need to send PRLI delay + * Call by: RCVD PLOGI ACC or callback for sending PLOGI ACC succeed + */ + unf_check_rport_need_delay_prli(lport, rport, rport->options); + + /* 6. Do not care: Just used for L_Port only is + * TGT mode or R_Port only is INI mode + */ + unf_schedule_open_work(lport, rport); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x_0x%x) send PLOGI ACC succeed with RPort(0x%x) feature(0x%x)", + lport->port_id, lport->nport_id, lport->options, + rport->nport_id, rport->options); +} + +unsigned int unf_send_plogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_plogi_payload_s *plogi_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3398, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3399, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3400, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PLOGI); + + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->pfn_callback = NULL; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + /* call back for sending PLOGI ACC */ + v_xchg->pfn_ob_callback = unf_plogi_acc_ob_callback; + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* Fill PLOGI ACC payload */ + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + plogi_acc_pld = &fc_entry->plogi_acc.payload; + unf_fill_plogi_acc_pld(plogi_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + /* Start to Send PLOGI ACC now */ + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + /* NOTE: free exchange */ + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + if ((v_rport->nport_id < UNF_FC_FID_DOM_MGR) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PLOGI ACC send %s. Port(0x%x_0x%x_0x%llx)--->rport(0x%x_0x%llx) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_lport->nport_id, + v_lport->port_name, + v_rport->nport_id, v_rport->port_name, + ox_id, rx_id); + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_rjt_pld(struct unf_els_rjt_s *v_els_rjt, + unsigned int v_reason_code, + unsigned int v_reason_explanation) +{ + UNF_CHECK_VALID(0x3401, UNF_TRUE, v_els_rjt, return); + + v_els_rjt->cmnd = UNF_ELS_CMND_RJT; + v_els_rjt->reason_code = (v_reason_code | v_reason_explanation); +} + +static void unf_fill_prli_acc_pld(struct unf_pril_payload_s *v_prli_acc_pld, + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned int port_mode = UNF_FC4_FRAME_PARM_3_TGT; + + UNF_CHECK_VALID(0x3402, UNF_TRUE, v_prli_acc_pld, return); + UNF_CHECK_VALID(0x3403, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3404, UNF_TRUE, v_rport, return); + + v_prli_acc_pld->cmnd = ( + UNF_ELS_CMND_ACC | + ((unsigned int)UNF_FC4_FRAME_PAGE_SIZE << + UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | + ((unsigned int)(sizeof(struct unf_pril_payload_s) - + UNF_PRLI_SIRT_EXTRA_SIZE))); + + v_prli_acc_pld->parms[0] = (UNF_FC4_FRAME_PARM_0_FCP | + UNF_FC4_FRAME_PARM_0_I_PAIR | + UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE); + v_prli_acc_pld->parms[1] = UNF_NOT_MEANINGFUL; + v_prli_acc_pld->parms[2] = UNF_NOT_MEANINGFUL; + + /* About INI/TGT mode */ + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) + /* return INI (0x20): R_Port has TGT mode, + * L_Port has INI mode + */ + port_mode = UNF_FC4_FRAME_PARM_3_INI; + else + port_mode = v_lport->options; + + /* About Read xfer_rdy disable */ + v_prli_acc_pld->parms[3] = (UNF_FC4_FRAME_PARM_3_R_XFER_DIS | + port_mode); /* 0x2 */ + + /* About Tape support */ + if (v_rport->tape_support_needed) { + v_prli_acc_pld->parms[3] |= + (UNF_FC4_FRAME_PARM_3_REC_SUPPORT | + UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT | + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT | + UNF_FC4_FRAME_PARM_3_CONF_ALLOW); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "PRLI ACC tape support"); + } + + /* About confirm */ + if (v_lport->low_level_func.lport_cfg_items.fcp_conf == UNF_TRUE) + /* 0x80 */ + v_prli_acc_pld->parms[3] |= UNF_FC4_FRAME_PARM_3_CONF_ALLOW; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_prli_acc_pld, sizeof(struct unf_pril_payload_s)); +} + +static void unf_prli_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + /* Report R_Port scsi Link Up */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + enum unf_rport_login_state_e rport_state = UNF_RPORT_ST_INIT; + + UNF_CHECK_VALID(0x3449, UNF_TRUE, v_xchg, return); + lport = v_xchg->lport; + rport = v_xchg->rport; + UNF_CHECK_VALID(0x3450, UNF_TRUE, lport, return); + UNF_CHECK_VALID(0x3451, UNF_TRUE, rport, return); + + /* Update & Report Link Up */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_READY); // READY + rport_state = rport->rp_state; + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[event]LOGIN: Port(0x%x) RPort(0x%x) state(0x%x) WWN(0x%llx) prliacc", + lport->port_id, rport->nport_id, + rport->rp_state, rport->port_name); + } + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + if (rport_state == UNF_RPORT_ST_READY) { + rport->logo_retries = 0; + unf_update_lport_state_by_linkup_event(lport, rport, + rport->options); + } +} + +unsigned int unf_send_prli_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_pril_payload_s *prli_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3405, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3406, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3407, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PRLI); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + v_xchg->pfn_callback = NULL; + /* callback when send succeed */ + v_xchg->pfn_ob_callback = unf_prli_acc_ob_callback; + + /* Fill common package */ + unf_fill_package(&pkg, v_xchg, v_rport); + + /* Get FC entry (alloc when create exchange) */ + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* Fill FRLI Payload */ + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + prli_acc_pld = &fc_entry->prli_acc.payload; + unf_fill_prli_acc_pld(prli_acc_pld, v_lport, v_rport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + /* Send ELS (RPLI) RSP */ + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + if ((v_rport->nport_id < UNF_FC_FID_DOM_MGR) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PRLI ACC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_send_rec_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + /* Reserved */ + UNF_REFERNCE_VAR(v_lport); + UNF_REFERNCE_VAR(v_rport); + UNF_REFERNCE_VAR(v_xchg); + + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + return RETURN_OK; +} + +static void unf_rrq_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_CHECK_VALID(0x3408, UNF_TRUE, v_xchg, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]RRQ ACC Xchg(0x%p) tag(0x%x)", + v_xchg, v_xchg->hot_pool_tag); + + UNF_REFERNCE_VAR(v_xchg); +} + +static void unf_fill_els_acc_pld(struct unf_els_acc_s *v_els_acc_pld) +{ + UNF_CHECK_VALID(0x3420, UNF_TRUE, v_els_acc_pld, return); + + v_els_acc_pld->cmnd = UNF_ELS_CMND_ACC; +} + +static void unf_rscn_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +static unsigned int unf_send_rscn_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_els_acc_s *rscn_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3421, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3422, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3423, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RSCN); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + /* Set call back function */ + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_rscn_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + rscn_acc = &fc_entry->els_acc; + unf_fill_els_acc_pld(rscn_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: RSCN ACC send %s. Port(0x%x)--->rport(0x%x) with OXID(0x%x) RXID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_logo_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +unsigned int unf_send_logo_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_els_acc_s *logo_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3424, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3425, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3426, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_LOGO); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_logo_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + logo_acc = &fc_entry->els_acc; + unf_fill_els_acc_pld(logo_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + if (v_rport->nport_id < UNF_FC_FID_DOM_MGR) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: LOGO ACC send %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_send_rrq_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_els_acc_s *rrq_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg = { 0 }; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3427, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3428, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3429, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + v_xchg->pfn_callback = NULL; // do noting + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + rrq_acc = &fc_entry->els_acc; + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RRQ); + v_xchg->pfn_ob_callback = unf_rrq_acc_ob_callback; // do noting + unf_fill_els_acc_pld(rrq_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + unf_fill_package(&pkg, v_xchg, v_rport); + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]RRQ ACC send %s. Port(0x%x)--->rport(0x%x) with Xchg(0x%p) OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, v_xchg, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_pdisc_acc_pld(struct unf_plogi_payload_s *v_pdisc_acc_pld, + struct unf_lport_s *v_lport) +{ + struct unf_lgn_parms_s *login_parms = NULL; + + UNF_CHECK_VALID(0x3430, UNF_TRUE, v_pdisc_acc_pld, return); + UNF_CHECK_VALID(0x3431, UNF_TRUE, v_lport, return); + + v_pdisc_acc_pld->cmnd = UNF_ELS_CMND_ACC; + login_parms = &v_pdisc_acc_pld->parms; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + login_parms->co_parms.bb_credit = + unf_low_level_bb_credit(v_lport); + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_NFPORT; + login_parms->co_parms.bb_scn = + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) ? + 0 : unf_low_level_bbscn(v_lport); + } else { + login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; + login_parms->co_parms.alternate_bb_credit_mgmt = + UNF_BBCREDIT_MANAGE_LPORT; + } + + login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; + login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; + login_parms->co_parms.continuously_increasing = + UNF_CONTIN_INCREASE_SUPPORT; + login_parms->co_parms.bb_receive_data_field_size = + v_lport->max_frame_size; + login_parms->co_parms.nport_total_concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->co_parms.relative_offset = UNF_PLOGI_RO_CATEGORY; + login_parms->co_parms.e_d_tov = v_lport->ed_tov; + + login_parms->cl_parms[2].valid = UNF_CLASS_VALID; // class-3 + login_parms->cl_parms[2].received_data_field_size = + v_lport->max_frame_size; + login_parms->cl_parms[2].concurrent_sequences = + UNF_PLOGI_CONCURRENT_SEQ; + login_parms->cl_parms[2].open_sequences_per_exchange = + UNF_PLOGI_SEQ_PER_XCHG; + + login_parms->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + login_parms->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + login_parms->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + login_parms->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_pdisc_acc_pld, + sizeof(struct unf_plogi_payload_s)); +} + +static void unf_pdisc_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +unsigned int unf_send_pdisc_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_plogi_payload_s *pdisc_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3432, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3433, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3434, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PDISC); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + /* Set call back function */ + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_pdisc_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + pdisc_acc_pld = &fc_entry->pdisc_acc.payload; + unf_fill_pdisc_acc_pld(pdisc_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send PDISC ACC %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_adisc_acc_pld(struct unf_adisc_payload_s *v_adisc_acc_pld, + struct unf_lport_s *v_lport) +{ + UNF_CHECK_VALID(0x3435, UNF_TRUE, v_adisc_acc_pld, return); + UNF_CHECK_VALID(0x3436, UNF_TRUE, v_lport, return); + + v_adisc_acc_pld->cmnd = (UNF_ELS_CMND_ACC); + + v_adisc_acc_pld->hard_address = (v_lport->nport_id & UNF_ALPA_MASK); + v_adisc_acc_pld->high_node_name = + UNF_GET_NAME_HIGH_WORD(v_lport->node_name); + v_adisc_acc_pld->low_node_name = + UNF_GET_NAME_LOW_WORD(v_lport->node_name); + v_adisc_acc_pld->high_port_name = + UNF_GET_NAME_HIGH_WORD(v_lport->port_name); + v_adisc_acc_pld->low_port_name = + UNF_GET_NAME_LOW_WORD(v_lport->port_name); + v_adisc_acc_pld->nport_id = v_lport->nport_id; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + v_adisc_acc_pld, + sizeof(struct unf_adisc_payload_s)); +} + +static void unf_adisc_acc_ob_callback(struct unf_xchg_s *v_xchg) +{ + UNF_REFERNCE_VAR(v_xchg); +} + +static unsigned int unf_send_adisc_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_adisc_payload_s *adisc_acc_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3437, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3438, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3439, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_ADISC); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_adisc_acc_ob_callback; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + adisc_acc_pld = &fc_entry->adisc_acc.adisc_payl; + unf_fill_adisc_acc_pld(adisc_acc_pld, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send ADISC ACC %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static void unf_fill_prlo_acc_pld(struct unf_prli_prlo_s *v_prlo_acc, + struct unf_lport_s *v_lport) +{ + struct unf_pril_payload_s *prlo_acc_pld = NULL; + + UNF_CHECK_VALID(0x3440, UNF_TRUE, v_prlo_acc, return); + + prlo_acc_pld = &v_prlo_acc->payload; + prlo_acc_pld->cmnd = (UNF_ELS_CMND_ACC | + ((unsigned int)UNF_FC4_FRAME_PAGE_SIZE << + UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | + ((unsigned int) + sizeof(struct unf_pril_payload_s))); + prlo_acc_pld->parms[0] = UNF_FC4_FRAME_PARM_0_FCP | + UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE; + prlo_acc_pld->parms[1] = 0; + prlo_acc_pld->parms[2] = 0; + prlo_acc_pld->parms[3] = 0; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, prlo_acc_pld, + sizeof(struct unf_pril_payload_s)); +} + +static unsigned int unf_send_prlo_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + struct unf_prli_prlo_s *prlo_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + UNF_CHECK_VALID(0x3441, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3442, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3443, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PRLO); + v_xchg->did = v_rport->nport_id; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + v_xchg->rport = v_rport; + + v_xchg->pfn_callback = NULL; // do nothing + v_xchg->pfn_ob_callback = NULL; // do nothing + + unf_fill_package(&pkg, v_xchg, v_rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + memset(fc_entry, 0, sizeof(union unf_sfs_u)); + prlo_acc = &fc_entry->prlo_acc; + unf_fill_prlo_acc_pld(prlo_acc, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send PRLO ACC %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +unsigned int unf_send_abts(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_frame_pkg_s pkg; + + UNF_CHECK_VALID(0x3444, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3445, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + rport = v_xchg->rport; + UNF_CHECK_VALID(0x3446, UNF_TRUE, rport, return UNF_RETURN_ERROR); + + /* set pkg info */ + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + pkg.type = UNF_PKG_BLS_REQ; + pkg.frame_head.csctl_sid = v_xchg->sid; + pkg.frame_head.rctl_did = v_xchg->did; + pkg.frame_head.oxid_rxid = + (unsigned int)v_xchg->ox_id << 16 | v_xchg->rx_id; + pkg.xchg_contex = v_xchg; + pkg.unf_cmnd_pload_bl.buffer_ptr = + (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + + pkg.unf_cmnd_pload_bl.buf_dma_addr = + v_xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr; + pkg.private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = v_xchg->hot_pool_tag; + + UNF_SET_XCHG_ALLOC_TIME(&pkg, v_xchg); + UNF_SET_ABORT_INFO_IOTYPE(&pkg, v_xchg); + + pkg.private[PKG_PRIVATE_XCHG_RPORT_INDEX] = + v_xchg->private[PKG_PRIVATE_XCHG_RPORT_INDEX]; + + /* Send ABTS frame to target */ + ret = unf_bls_cmnd_send(v_lport, &pkg, v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) send ABTS %s. Abort exch(0x%p) Cmdsn:0x%lx, tag(0x%x) iotype(0x%x)", + v_lport->port_id, v_lport->nport_id, + (ret == UNF_RETURN_ERROR) ? "failed" : "succeed", + v_xchg, (unsigned long)v_xchg->cmnd_sn, + v_xchg->hot_pool_tag, v_xchg->data_direction); + + UNF_REFERNCE_VAR(rport); + return ret; +} + +unsigned int unf_release_rport_res(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_rport_info_s rport_info; + + UNF_CHECK_VALID(0x3447, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3448, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + memset(&rport_info, 0, sizeof(struct unf_rport_info_s)); + + rport_info.rport_index = v_rport->rport_index; + rport_info.nport_id = v_rport->nport_id; + rport_info.port_name = v_rport->port_name; + + /* 2. release R_Port(parent context/Session) resource */ + if (!v_lport->low_level_func.service_op.pfn_unf_release_rport_res) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) release rport resource function can't be NULL", + v_lport->port_id); + + return ret; + } + + ret = v_lport->low_level_func.service_op.pfn_unf_release_rport_res( + v_lport->fc_port, + &rport_info); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) rport_index(0x%x, %p) send release session CMND failed", + v_lport->port_id, rport_info.rport_index, v_rport); + + return ret; +} + +static inline unsigned char unf_determin_bbscn(unsigned char local_bbscn, + unsigned char remote_bbscn) +{ + if ((remote_bbscn == 0) || (local_bbscn == 0)) + local_bbscn = 0; + else + local_bbscn = local_bbscn > remote_bbscn ? + local_bbscn : remote_bbscn; + + return local_bbscn; +} + +static void unf_cfg_lowlevel_fabric_params( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_fabric_parms_s *v_login_parms) +{ + struct unf_port_login_parms_s login_co_parms = { 0 }; + unsigned int remote_edtov = 0; + unsigned int ret = 0; + unsigned char remote_edtov_resolution = 0; /* 0:ms; 1:ns */ + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) + return; + + login_co_parms.remote_rttov_tag = + (unsigned char)UNF_GET_RT_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.remote_edtov_tag = 0; + login_co_parms.remote_bbcredit = + (unsigned short) + UNF_GET_BB_CREDIT_FROM_PARAMS(v_login_parms); + login_co_parms.compared_bbscn = + (unsigned int)unf_determin_bbscn( + (unsigned char) + v_lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char) + UNF_GET_BB_SC_N_FROM_PARAMS(v_login_parms)); + + remote_edtov_resolution = + (unsigned char) + UNF_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(v_login_parms); + remote_edtov = UNF_GET_E_D_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.compared_edtov_val = + remote_edtov_resolution ? + (remote_edtov / 1000000) : remote_edtov; + + login_co_parms.compared_ratov_val = + UNF_GET_RA_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.els_cmnd_code = ELS_FLOGI; + + if (v_lport->en_act_topo & UNF_TOP_P2P_MASK) { + login_co_parms.en_act_topo = + (v_login_parms->co_parms.n_port == UNF_F_PORT) ? + UNF_ACT_TOP_P2P_FABRIC : UNF_ACT_TOP_P2P_DIRECT; + } else { + login_co_parms.en_act_topo = v_lport->en_act_topo; + } + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + (void *)v_lport->fc_port, + UNF_PORT_CFG_UPDATE_FABRIC_PARAM, + (void *)&login_co_parms); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Lowlevel unsupport fabric config"); +} + +static unsigned int unf_check_flogi_params( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_fabric_parms_s *v_fabric_parms) +{ + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3460, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3461, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3462, UNF_TRUE, v_fabric_parms, + return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_lport); + UNF_REFERNCE_VAR(v_rport); + + if (v_fabric_parms->cl_parms[2].valid == UNF_CLASS_INVALID) { + /* Discard directly */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) NPort_ID(0x%x) FLOGI not support class3", + v_lport->port_id, v_rport->nport_id); + + return UNF_RETURN_ERROR; + } + + return ret; +} + +static void unf_save_fabric_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_fabric_parms_s *v_fabric_parms) +{ + unsigned long long fabric_node_name = 0; + + UNF_CHECK_VALID(0x3463, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3464, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3465, UNF_TRUE, v_fabric_parms, return); + + UNF_REFERNCE_VAR(v_lport); + fabric_node_name = (unsigned long long) + (((unsigned long long) + (v_fabric_parms->high_node_name) << 32) | + ((unsigned long long) + (v_fabric_parms->low_node_name))); + + /* R_Port for 0xfffffe is used for FLOGI, not need to save WWN */ + if (v_fabric_parms->co_parms.bb_receive_data_field_size > + UNF_MAX_FRAME_SIZE) + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; // 2112 + else + v_rport->max_frame_size = + v_fabric_parms->co_parms.bb_receive_data_field_size; + + /* with Fabric attribute */ + if (v_fabric_parms->co_parms.n_port == UNF_F_PORT) { + v_rport->ed_tov = v_fabric_parms->co_parms.e_d_tov; + v_rport->ra_tov = v_fabric_parms->co_parms.r_a_tov; + v_lport->ed_tov = v_fabric_parms->co_parms.e_d_tov; + v_lport->ra_tov = v_fabric_parms->co_parms.r_a_tov; + v_lport->rr_tov = UNF_CALC_LPORT_RRTOV(v_lport); + v_lport->fabric_node_name = fabric_node_name; + } + + /* Configure info from FLOGI to chip */ + unf_cfg_lowlevel_fabric_params(v_lport, v_rport, v_fabric_parms); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) Rport(0x%x) login parameter: E_D_TOV = %u. LPort E_D_TOV = %u. fabric nodename: 0x%x%x", + v_lport->port_id, + v_rport->nport_id, + (v_fabric_parms->co_parms.e_d_tov), + v_lport->ed_tov, + v_fabric_parms->high_node_name, + v_fabric_parms->low_node_name); +} + +static unsigned int unf_flogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_flogi_fdisc_acc_s *flogi_frame = NULL; + struct unf_fabric_parms_s *fabric_login_parms = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + unsigned long long wwpn = 0; + unsigned long long wwnn = 0; + + UNF_CHECK_VALID(0x3466, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3467, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_sid); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_REG_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x)<---RPort(0x%x) Receive FLOGI with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_FLOGI); + + /* Check L_Port state: Offline */ + if (v_lport->en_states >= UNF_LPORT_ST_OFFLINE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) with state(0x%x) not need to handle FLOGI", + v_lport->port_id, v_lport->en_states); + + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + flogi_frame = + &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi; + fabric_login_parms = &flogi_frame->flogi_payload.fabric_parms; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + &flogi_frame->flogi_payload, + sizeof(struct unf_flogi_payload_s)); + wwpn = (unsigned long long) + (((unsigned long long) + (fabric_login_parms->high_port_name) << 32) | + ((unsigned long long)fabric_login_parms->low_port_name)); + wwnn = (unsigned long long) + (((unsigned long long) + (fabric_login_parms->high_node_name) << 32) | + ((unsigned long long)fabric_login_parms->low_node_name)); + + /* Get (new) R_Port: reuse only */ + rport = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_FLOGI); + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no RPort. do nothing", + v_lport->port_id); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* Update R_Port info */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->port_name = wwpn; + rport->node_name = wwnn; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Check RCVD FLOGI parameters: only for class-3 */ + ret = unf_check_flogi_params(v_lport, rport, fabric_login_parms); + if (ret != RETURN_OK) { + /* Discard directly */ + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + /* P2P fabric */ + unf_lport_update_topo(v_lport, UNF_ACT_TOP_P2P_DIRECT); + + /* Save fabric parameters */ + unf_save_fabric_params(v_lport, rport, fabric_login_parms); + + /* Send ACC for FLOGI */ + ret = unf_send_flogi_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send FLOGI ACC failed and do recover", + v_lport->port_id); + + /* Do L_Port recovery */ + unf_lport_error_recovery(v_lport); + } + + return ret; +} + +static void unf_cfg_lowlevel_port_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms, + unsigned int v_cmd_type) +{ + struct unf_port_login_parms_s login_co_parms = { 0 }; + unsigned int ret = 0; + + if (!v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set) + return; + + login_co_parms.rport_index = v_rport->rport_index; + login_co_parms.seq_cnt = 0; + login_co_parms.ed_tov = 0; + login_co_parms.ed_tov_timer_val = v_lport->ed_tov; + login_co_parms.tx_mfs = v_rport->max_frame_size; + + login_co_parms.remote_rttov_tag = + (unsigned char)UNF_GET_RT_TOV_FROM_PARAMS(v_login_parms); + login_co_parms.remote_edtov_tag = 0; + login_co_parms.remote_bbcredit = + (unsigned short)UNF_GET_BB_CREDIT_FROM_PARAMS(v_login_parms); + login_co_parms.els_cmnd_code = v_cmd_type; + + if (v_lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + login_co_parms.compared_bbscn = 0; + } else { + login_co_parms.compared_bbscn = + (unsigned int)unf_determin_bbscn( + (unsigned char) + v_lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char) + UNF_GET_BB_SC_N_FROM_PARAMS(v_login_parms)); + } + + login_co_parms.compared_edtov_val = v_lport->ed_tov; + login_co_parms.compared_ratov_val = v_lport->ra_tov; + + ret = v_lport->low_level_func.port_mgr_op.pfn_ll_port_config_set( + (void *)v_lport->fc_port, + UNF_PORT_CFG_UPDATE_PLOGI_PARAM, + (void *)&login_co_parms); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) Lowlevel unsupport port config", + v_lport->port_id); +} + +unsigned int unf_check_plogi_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms) +{ + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3468, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3469, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3470, UNF_TRUE, v_login_parms, + return UNF_RETURN_ERROR); + + /* Parameters check: Class-type */ + if ((v_login_parms->cl_parms[2].valid == UNF_CLASS_INVALID) || + (v_login_parms->co_parms.bb_receive_data_field_size == 0)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort N_Port_ID(0x%x) with PLOGI parameters invalid: class3(%u), BBReceiveDataFieldSize(0x%x), send LOGO", + v_lport->port_id, v_rport->nport_id, + v_login_parms->cl_parms[2].valid, + v_login_parms->co_parms.bb_receive_data_field_size); + + spin_lock_irqsave(&v_rport->rport_state_lock, flag); + /* --->>> LOGO */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flag); + + /* Enter LOGO stage */ + unf_rport_enter_logo(v_lport, v_rport); + return UNF_RETURN_ERROR; + } + + /* 16G FC Brocade SW, Domain Controller's + * PLOGI both support CLASS-1 & CLASS-2 + */ + if ((v_login_parms->cl_parms[0].valid == UNF_CLASS_VALID) || + (v_login_parms->cl_parms[1].valid == UNF_CLASS_VALID)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) get PLOGI class1(%u) class2(%u) from N_Port_ID(0x%x)", + v_lport->port_id, + v_login_parms->cl_parms[0].valid, + v_login_parms->cl_parms[1].valid, + v_rport->nport_id); + } + + return ret; +} + +static void unf_save_plogi_params(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms, + unsigned int v_cmd_code) +{ +#define UNF_DELAY_TIME 100 /* WWPN smaller delay to send PRLI with COM mode */ + + unsigned long long wwpn = INVALID_VALUE64; + unsigned long long wwnn = INVALID_VALUE64; + unsigned int ed_tov = 0; + unsigned int remote_edtov = 0; + + if (v_login_parms->co_parms.bb_receive_data_field_size > + UNF_MAX_FRAME_SIZE) + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; // 2112 + else + v_rport->max_frame_size = + v_login_parms->co_parms.bb_receive_data_field_size; + + wwnn = (unsigned long long) + (((unsigned long long) + (v_login_parms->high_node_name) << 32) | + ((unsigned long long)v_login_parms->low_node_name)); + wwpn = (unsigned long long) + (((unsigned long long) + (v_login_parms->high_port_name) << 32) | + ((unsigned long long)v_login_parms->low_port_name)); + + remote_edtov = v_login_parms->co_parms.e_d_tov; + ed_tov = v_login_parms->co_parms.e_d_tov_resolution ? + (remote_edtov / 1000000) : remote_edtov; + + v_rport->port_name = wwpn; + v_rport->node_name = wwnn; + v_rport->local_nport_id = v_lport->nport_id; + + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) || + (v_lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP)) { + /* P2P or Private Loop */ + v_lport->ed_tov = (v_lport->ed_tov > ed_tov) ? + v_lport->ed_tov : ed_tov; + v_lport->ra_tov = 2 * v_lport->ed_tov; // 2 * E_D_TOV + v_lport->rr_tov = UNF_CALC_LPORT_RRTOV(v_lport); + + if (ed_tov != 0) + v_rport->ed_tov = ed_tov; + else + v_rport->ed_tov = UNF_DEFAULT_EDTOV; + } else { + /* SAN: E_D_TOV updated by FLOGI */ + v_rport->ed_tov = v_lport->ed_tov; + } + + /* WWPN smaller: delay to send PRLI */ + if (v_rport->port_name > v_lport->port_name) + v_rport->ed_tov += UNF_DELAY_TIME; // 100ms + + /* Configure port parameters to low level (chip) */ + unf_cfg_lowlevel_port_params(v_lport, v_rport, v_login_parms, + v_cmd_code); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) RPort(0x%x) with WWPN(0x%llx) WWNN(0x%llx) login: ED_TOV(%u) Port: ED_TOV(%u)", + v_lport->port_id, + v_rport->nport_id, + v_rport->port_name, v_rport->node_name, + ed_tov, + v_lport->ed_tov); +} + +static int unf_check_bbscn_is_enabled(unsigned char local_bbscn, + unsigned char remote_bbscn) +{ + return unf_determin_bbscn(local_bbscn, remote_bbscn) ? + UNF_TRUE : UNF_FALSE; +} + +static unsigned int unf_irq_process_switch_2_thread(void *v_lport, + struct unf_xchg_s *v_xchg, + unf_evt_task v_evt_task) +{ + struct unf_cm_event_report *event = NULL; + struct unf_xchg_s *xchg = NULL; + unsigned int ret = 0; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x1996, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x1996, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + lport = v_lport; + xchg = v_xchg; + + if (unlikely((!lport->event_mgr.pfn_unf_get_free_event) || + (!lport->event_mgr.pfn_unf_post_event) || + (!lport->event_mgr.pfn_unf_release_event))) { + UNF_TRACE(0x2065, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) event function is NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + ret = unf_xchg_ref_inc(xchg, SFS_RESPONSE); + UNF_CHECK_VALID(0x3343, UNF_TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + event = lport->event_mgr.pfn_unf_get_free_event((void *)v_lport); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, event, + return UNF_RETURN_ERROR); + + event->lport = lport; + event->event_asy_flag = UNF_EVENT_ASYN; + event->pfn_unf_event_task = v_evt_task; + event->para_in = v_xchg; + lport->event_mgr.pfn_unf_post_event(lport, event); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) start to switch thread process now", + lport->port_id); + + return ret; +} + +static unsigned int unf_plogi_handler_com_process(struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_s *xchg = v_xchg; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_plogi_pdisc_s *plogi_frame = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flag = 0; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->rport, + return UNF_RETURN_ERROR); + + lport = xchg->lport; + rport = xchg->rport; + plogi_frame = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi; + login_parms = &plogi_frame->payload.parms; + + unf_save_plogi_params(lport, rport, login_parms, + ELS_PLOGI); + + /* Update state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = xchg->sid; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Send PLOGI ACC to remote port */ + ret = unf_send_plogi_acc(lport, rport, xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send PLOGI ACC failed", + lport->port_id); + + /* NOTE: exchange has been freed inner(before) */ + unf_rport_error_recovery(rport); + return ret; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x) send PLOGI ACC to Port(0x%x) succeed", + lport->port_id, rport->nport_id); + + return ret; +} + +static int unf_plogi_async_handle(void *v_argc_in, void *v_argc_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_argc_in; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + ret = unf_plogi_handler_com_process(xchg); + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + + return (int)ret; +} + +static unsigned int unf_send_els_rjt_by_did(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + unsigned int v_did, + struct unf_rjt_info_s *v_rjt_info) +{ + struct unf_els_rjt_s *els_rjt = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = v_xchg; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + UNF_CHECK_VALID(0x3503, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3504, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + xchg->cmnd_code = UNF_SET_ELS_RJT_TYPE(v_rjt_info->els_cmnd_code); + xchg->did = v_did; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = v_lport; + xchg->rport = NULL; + xchg->disc_rport = NULL; + + xchg->pfn_callback = NULL; + xchg->pfn_ob_callback = NULL; + + unf_fill_package(&pkg, xchg, NULL); + + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + + els_rjt = &fc_entry->els_rjt; + memset(els_rjt, 0, sizeof(struct unf_els_rjt_s)); + unf_fill_rjt_pld(els_rjt, v_rjt_info->reason_code, + v_rjt_info->reason_explanation); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send LS_RJT %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_did, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_plogi_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_xchg_s *xchg = v_xchg; + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + struct unf_plogi_pdisc_s *plogi_frame = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + struct unf_rjt_info_s rjt_info = { 0 }; + unsigned long long wwpn = INVALID_VALUE64; + unsigned int ret = UNF_RETURN_ERROR; + int bbscn_enabled = UNF_FALSE; + int switch_2_thread = UNF_FALSE; + + UNF_CHECK_VALID(0x3474, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3475, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + /* 1. Maybe: PLOGI is sent by Name server */ + if ((v_sid < UNF_FC_FID_DOM_MGR) || + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive PLOGI. Port(0x%x_0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, v_sid, + v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_PLOGI); + + /* 2. State check: Offline */ + if (lport->en_states >= UNF_LPORT_ST_OFFLINE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) received PLOGI with state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + unf_cm_free_xchg(lport, xchg); + return UNF_RETURN_ERROR; + } + + /* + * 3. According to FC-LS 4.2.7.1: + * After RCVD PLogi or send Plogi ACC, need to termitate open EXCH + */ + unf_cm_xchg_mgr_abort_io_by_id(lport, rport, v_sid, lport->nport_id, 0); + + /* Get R_Port by WWpn */ + plogi_frame = + &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi; + login_parms = &plogi_frame->payload.parms; + + UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, + &plogi_frame->payload, + sizeof(struct unf_plogi_payload_s)); + + wwpn = (unsigned long long) + (((unsigned long long) + (login_parms->high_port_name) << 32) | + ((unsigned long long)login_parms->low_port_name)); + + /* 4. Get (new) R_Port (by wwpn) */ + rport = unf_find_rport(lport, v_sid, wwpn); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, v_sid); + if (!rport) { + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = ELS_PLOGI; + rjt_info.reason_code = UNF_LS_RJT_BUSY; + rjt_info.reason_explanation = + UNF_LS_RJT_INSUFFICIENT_RESOURCES; + + /* R_Port is NULL: Send ELS RJT for PLOGI */ + (void)unf_send_els_rjt_by_did(lport, xchg, v_sid, &rjt_info); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no RPort and send PLOGI reject", + lport->port_id); + + /* NOTE: exchange has been freed inner(before) */ + return UNF_RETURN_ERROR; + } + + /* 5. Cancel recovery timer work after RCVD PLOGI */ + if (cancel_delayed_work(&rport->recovery_work)) + atomic_dec(&rport->rport_ref_cnt); + + /* + * 6. Plogi parameters check + * Call by: (RCVD) PLOGI handler & callback function for RCVD PLOGI_ACC + */ + ret = unf_check_plogi_params(lport, rport, login_parms); + if (ret != RETURN_OK) { + unf_cm_free_xchg(lport, xchg); + return UNF_RETURN_ERROR; + } + + xchg->lport = v_lport; + xchg->rport = rport; + xchg->sid = v_sid; + + /* 7. About bbscn for context change */ + bbscn_enabled = unf_check_bbscn_is_enabled( + (unsigned char)lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); + if ((lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) && + (bbscn_enabled == UNF_TRUE)) { + switch_2_thread = UNF_TRUE; + lport->b_bbscn_support = UNF_TRUE; + } + + /* 8. Process PLOGI Frame: switch to thread if necessary */ + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) + /* Wait for LR complete sync */ + ret = unf_irq_process_switch_2_thread(lport, xchg, + unf_plogi_async_handle); + else + ret = unf_plogi_handler_com_process(xchg); + + return ret; +} + +static void unf_obtain_tape_capacity(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int tape_parm) +{ + unsigned int rec_support = 0; + unsigned int task_retry_support = 0; + unsigned int retry_support = 0; + + rec_support = tape_parm & UNF_FC4_FRAME_PARM_3_REC_SUPPORT; + task_retry_support = tape_parm & + UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; + retry_support = tape_parm & UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; + + if ((v_lport->low_level_func.lport_cfg_items.tape_support) && + rec_support && task_retry_support && retry_support) { + v_rport->tape_support_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) FC_tape is needed for RPort(0x%x)", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id); + } + + if ((tape_parm & UNF_FC4_FRAME_PARM_3_CONF_ALLOW) && + (v_lport->low_level_func.lport_cfg_items.fcp_conf != UNF_FALSE)) { + v_rport->fcp_conf_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) FCP confirm is needed for RPort(0x%x)", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id); + } +} + +unsigned int unf_prli_handler_com_process(struct unf_xchg_s *v_xchg) +{ + struct unf_prli_prlo_s *prli = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned long flags = 0; + unsigned int uisid = 0; + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + + xchg = v_xchg; + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, + return UNF_RETURN_ERROR); + lport = xchg->lport; + uisid = v_xchg->sid; + + UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_PRLI); + + /* 1. Get R_Port: for each R_Port from rport_busy_list */ + rport = unf_get_rport_by_nport_id(lport, uisid); + if (!rport) { + /* non session (R_Port) existence */ + (void)unf_send_logo_by_did(lport, uisid); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) received PRLI but no RPort SID(0x%x) OX_ID(0x%x)", + lport->port_id, lport->nport_id, uisid, + v_xchg->ox_id); + + unf_cm_free_xchg(lport, v_xchg); + return ret; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Receive PRLI. Port(0x%x)<---RPort(0x%x) with S_ID(0x%x)", + lport->port_id, rport->nport_id, uisid); + + /* 2. Get PRLI info */ + prli = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prli; + if ((uisid < UNF_FC_FID_DOM_MGR) || + (lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]LOGIN: Receive PRLI. Port(0x%x_0x%x)<---RPort(0x%x) parameter-3(0x%x) OX_ID(0x%x)", + lport->port_id, lport->nport_id, uisid, + prli->payload.parms[3], v_xchg->ox_id); + } + + UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, + &prli->payload, sizeof(struct unf_pril_payload_s)); + + spin_lock_irqsave(&rport->rport_state_lock, flags); + + /* 3. Increase R_Port ref_cnt */ + ret = unf_rport_ref_inc(rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x_0x%p) is removing and do nothing", + lport->port_id, rport->nport_id, rport); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_cm_free_xchg(lport, v_xchg); + return RETURN_OK; + } + + /* 4. Cancel R_Port Open work */ + if (cancel_delayed_work(&rport->open_work)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) RPort(0x%x) cancel open work succeed", + lport->port_id, lport->nport_id, rport->nport_id); + + /* This is not the last counter */ + atomic_dec(&rport->rport_ref_cnt); + } + + /* 5. Check R_Port state */ + if ((rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) && + (rport->rp_state != UNF_RPORT_ST_READY)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) RPort(0x%x) with state(0x%x) when received PRLI, send LOGO", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); // LOGO + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* NOTE: Start to send LOGO */ + unf_rport_enter_logo(lport, rport); + + unf_cm_free_xchg(lport, v_xchg); + unf_rport_ref_dec(rport); + + return ret; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* 6. Update R_Port options(INI/TGT/BOTH) */ + rport->options = prli->payload.parms[3] & + (UNF_FC4_FRAME_PARM_3_TGT | + UNF_FC4_FRAME_PARM_3_INI); + + unf_update_port_feature(rport->port_name, rport->options); + + /* for Confirm */ + rport->fcp_conf_needed = UNF_FALSE; + + unf_obtain_tape_capacity(lport, rport, prli->payload.parms[3]); + + if ((prli->payload.parms[3] & UNF_FC4_FRAME_PARM_3_CONF_ALLOW) && + (lport->low_level_func.lport_cfg_items.fcp_conf != UNF_FALSE)) { + rport->fcp_conf_needed = UNF_TRUE; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x_0x%x) FCP confirm is needed for RPort(0x%x)", + lport->port_id, lport->nport_id, rport->nport_id); + } + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x_0x%x) RPort(0x%x) parameter-3(0x%x) options(0x%x)", + lport->port_id, lport->nport_id, rport->nport_id, + prli->payload.parms[3], rport->options); + + /* 7. Send PRLI ACC */ + ret = unf_send_prli_acc(lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) RPort(0x%x) send PRLI ACC failed", + lport->port_id, lport->nport_id, rport->nport_id); + + /* NOTE: exchange has been freed inner(before) */ + unf_rport_error_recovery(rport); + } + + /* 8. Decrease R_Port ref_cnt */ + unf_rport_ref_dec(rport); + + return ret; +} + +static int unf_prli_async_handle(void *v_argc_in, void *v_argc_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_argc_in; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + ret = unf_prli_handler_com_process(xchg); + + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + + return (int)ret; +} + +static unsigned int unf_prli_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + int switch_2_thread = UNF_FALSE; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3476, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3477, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + v_xchg->sid = v_sid; + v_xchg->lport = v_lport; + lport = v_lport; + + if ((v_lport->b_bbscn_support) && + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) + switch_2_thread = UNF_TRUE; + + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) + /* Wait for LR done sync */ + ret = unf_irq_process_switch_2_thread(v_lport, v_xchg, + unf_prli_async_handle); + else + ret = unf_prli_handler_com_process(v_xchg); + + return ret; +} + +static void unf_save_rscn_port_id( + struct unf_rscn_mg_s *v_rscn_mg, + struct unf_rscn_port_id_page_s *v_rscn_port_id) +{ + struct unf_port_id_page_s *exit_port_id_page = NULL; + struct unf_port_id_page_s *new_port_id_page = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + unsigned long flag = 0; + enum int_e repeat = UNF_FALSE; + + UNF_CHECK_VALID(0x3478, UNF_TRUE, v_rscn_mg, return); + UNF_CHECK_VALID(0x3479, UNF_TRUE, v_rscn_port_id, return); + + /* 1. check new RSCN Port_ID (RSNC_Page) + * whether within RSCN_Mgr or not + */ + spin_lock_irqsave(&v_rscn_mg->rscn_id_list_lock, flag); + if (list_empty(&v_rscn_mg->list_using_rscn_page)) { + repeat = UNF_FALSE; + } else { + /* Check repeat: for each exist RSCN page + * form RSCN_Mgr Page list + */ + list_for_each_safe(node, next_node, + &v_rscn_mg->list_using_rscn_page) { + exit_port_id_page = + list_entry(node, struct unf_port_id_page_s, + list_node_rscn); + if ((exit_port_id_page->port_id_port == + v_rscn_port_id->port_id_port) && + (exit_port_id_page->port_id_area == + v_rscn_port_id->port_id_area) && + (exit_port_id_page->port_id_domain == + v_rscn_port_id->port_id_domain)) { + repeat = UNF_TRUE; + break; + } + } + } + spin_unlock_irqrestore(&v_rscn_mg->rscn_id_list_lock, flag); + + UNF_CHECK_VALID(0x3480, UNF_TRUE, v_rscn_mg->pfn_unf_get_free_rscn_node, + return); + + /* 2. Get & add free RSNC Node --->>> RSCN_Mgr */ + if (repeat == UNF_FALSE) { + new_port_id_page = + v_rscn_mg->pfn_unf_get_free_rscn_node(v_rscn_mg); + if (!new_port_id_page) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, + UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Get free RSCN node failed"); + + return; + } + + new_port_id_page->uc_addr_format = v_rscn_port_id->addr_format; + new_port_id_page->uc_event_qualifier = + v_rscn_port_id->event_qualifier; + new_port_id_page->uc_reserved = v_rscn_port_id->reserved; + new_port_id_page->port_id_domain = + v_rscn_port_id->port_id_domain; + new_port_id_page->port_id_area = v_rscn_port_id->port_id_area; + new_port_id_page->port_id_port = v_rscn_port_id->port_id_port; + + /* Add entry to list: using_rscn_page */ + spin_lock_irqsave(&v_rscn_mg->rscn_id_list_lock, flag); + list_add_tail(&new_port_id_page->list_node_rscn, + &v_rscn_mg->list_using_rscn_page); + spin_unlock_irqrestore(&v_rscn_mg->rscn_id_list_lock, flag); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) has repeat RSCN node with domain(0x%x) area(0x%x)", + v_rscn_port_id->port_id_domain, + v_rscn_port_id->port_id_area, + v_rscn_port_id->port_id_port); + } +} + +static unsigned int unf_analysis_rscn_payload(struct unf_lport_s *v_lport, + struct unf_rscn_pld_s *v_rscn_pld) +{ +#define UNF_OS_DISC_REDISC_TIME 10000 + + struct unf_rscn_port_id_page_s *rscn_port_id = NULL; + struct unf_disc_s *disc = NULL; + struct unf_rscn_mg_s *rscn_mgr = NULL; + unsigned int i = 0; + unsigned int pld_len = 0; + unsigned int port_id_page_cnt = 0; + unsigned int ret = RETURN_OK; + unsigned long flag = 0; + enum int_e need_disc_flag = UNF_FALSE; + + UNF_CHECK_VALID(0x3481, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3482, UNF_TRUE, v_rscn_pld, return UNF_RETURN_ERROR); + + /* This field is the length in bytes of the entire Payload, + * inclusive of the word 0 + */ + pld_len = UNF_GET_RSCN_PLD_LEN(v_rscn_pld->cmnd); + pld_len -= sizeof(v_rscn_pld->cmnd); + port_id_page_cnt = pld_len / UNF_RSCN_PAGE_LEN; + + /* Pages within payload is nor more than 255 */ + if (port_id_page_cnt > UNF_RSCN_PAGE_SUM) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x_0x%x) page num(0x%x) exceed 255 in RSCN", + v_lport->port_id, v_lport->nport_id, + port_id_page_cnt); + + return UNF_RETURN_ERROR; + } + + /* L_Port-->Disc-->Rscn_Mgr */ + disc = &v_lport->disc; + rscn_mgr = &disc->rscn_mgr; + + /* for each ID from RSCN_Page: check whether need to Disc or not */ + while (i < port_id_page_cnt) { + rscn_port_id = &v_rscn_pld->port_id_page[i]; + if (unf_lookup_lport_by_nport_id(v_lport, *(unsigned int *)rscn_port_id)) { + /* Prevent to create session with L_Port which have the same N_Port_ID */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) find local N_Port_ID(0x%x) within RSCN payload", + ((struct unf_lport_s *) + (v_lport->root_lport))->nport_id, + *(unsigned int *)rscn_port_id); + } else { + /* New RSCN_Page ID find, save it to RSCN_Mgr */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x_0x%x) save RSCN N_Port_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, + *(unsigned int *)rscn_port_id); + + /* 1. new RSCN_Page ID find, save it to RSCN_Mgr */ + unf_save_rscn_port_id(rscn_mgr, rscn_port_id); + need_disc_flag = UNF_TRUE; + unf_report_io_dm_event(v_lport, ELS_RSCN, + *(unsigned int *)rscn_port_id); + } + i++; + } + + if (need_disc_flag != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_NORMAL, UNF_MAJOR, + "[info]Port(0x%x) find all N_Port_ID and do not need to disc", + ((struct unf_lport_s *)(v_lport->root_lport))->nport_id); + + return RETURN_OK; + } + + /* 2. Do/Start Disc: Check & do Disc (GID_PT) process */ + if (!disc->unf_disc_temp.pfn_unf_disc_start) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) DISC start function is NULL", + v_lport->nport_id, v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + if ((disc->en_states == UNF_DISC_ST_END) || + ((jiffies - disc->last_disc_jiff) > + msecs_to_jiffies(UNF_OS_DISC_REDISC_TIME))) { + disc->disc_option = UNF_RSCN_DISC; + disc->last_disc_jiff = jiffies; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + ret = disc->unf_disc_temp.pfn_unf_disc_start(v_lport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_ABNORMAL, UNF_INFO, + "[info]Port(0x%x_0x%x) DISC state(0x%x) with last time(%llu) and don't do DISC", + v_lport->port_id, v_lport->nport_id, + disc->en_states, disc->last_disc_jiff); + + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + } + + return ret; +} + +static unsigned int unf_rscn_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + /* + * A RSCN ELS shall be sent to registered Nx_Ports + * when an event occurs that may have affected the state of + * one or more Nx_Ports, or the ULP state within the Nx_Port. + * + * The Payload of a RSCN Request includes a list + * containing the addresses of the affected Nx_Ports. + * + * Each affected Port_ID page contains the ID of the Nx_Port, + * Fabric Controller, E_Port, domain, or area for + * which the event was detected. + */ + struct unf_rscn_pld_s *rscn_pld = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int pld_len = 0; + + UNF_REFERNCE_VAR(pld_len); + UNF_CHECK_VALID(0x3483, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3484, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Receive RSCN Port(0x%x_0x%x)<---RPort(0x%x) OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, v_sid, + v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_RSCN); + + /* 1. Get R_Port by S_ID */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); // rport busy_list + if (!rport) { + rport = unf_rport_get_free_and_init(v_lport, + UNF_PORT_TYPE_FC, v_sid); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) received RSCN but has no RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, + v_sid, v_xchg->ox_id); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + rport->nport_id = v_sid; + } + + rscn_pld = + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; + UNF_CHECK_VALID(0x3485, UNF_TRUE, NULL != rscn_pld, + return UNF_RETURN_ERROR); + pld_len = UNF_GET_RSCN_PLD_LEN(rscn_pld->cmnd); + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, rscn_pld, pld_len); + + /* 2. NOTE: Analysis RSCN payload(save & disc if necessary) */ + ret = unf_analysis_rscn_payload(v_lport, rscn_pld); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) analysis RSCN failed", + v_lport->port_id, v_lport->nport_id); + + /* 3. send rscn_acc after analysis payload */ + ret = unf_send_rscn_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) send RSCN response failed", + v_lport->port_id, v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + UNF_REFERNCE_VAR(pld_len); + return ret; +} + +static void unf_analysis_pdisc_pld(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_plogi_pdisc_s *v_pdisc) +{ + struct unf_lgn_parms_s *pdisc_params = NULL; + unsigned long long wwpn = INVALID_VALUE64; + unsigned long long wwnn = INVALID_VALUE64; + + UNF_CHECK_VALID(0x3486, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3487, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3488, UNF_TRUE, v_pdisc, return); + UNF_REFERNCE_VAR(v_lport); + + pdisc_params = &v_pdisc->payload.parms; + if (pdisc_params->co_parms.bb_receive_data_field_size > + UNF_MAX_FRAME_SIZE) + v_rport->max_frame_size = UNF_MAX_FRAME_SIZE; // 2112 + else + v_rport->max_frame_size = + pdisc_params->co_parms.bb_receive_data_field_size; + + wwnn = (unsigned long long) + (((unsigned long long) + (pdisc_params->high_node_name) << 32) | + ((unsigned long long)pdisc_params->low_node_name)); + wwpn = (unsigned long long) + (((unsigned long long)(pdisc_params->high_port_name) << 32) | + ((unsigned long long)pdisc_params->low_port_name)); + + v_rport->port_name = wwpn; + v_rport->node_name = wwnn; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) save PDISC parameters to Rport(0x%x) WWPN(0x%llx) WWNN(0x%llx)", + v_lport->port_id, v_rport->nport_id, + v_rport->port_name, v_rport->node_name); +} + +static unsigned int unf_send_pdisc_rjt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg) +{ + unsigned int ret = UNF_RETURN_ERROR; + struct unf_rjt_info_s rjt_info; + + UNF_CHECK_VALID(0x3432, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3433, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3434, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = ELS_PDISC; + rjt_info.reason_code = UNF_LS_RJT_LOGICAL_ERROR; + rjt_info.reason_explanation = UNF_LS_RJT_NO_ADDITIONAL_INFO; + + ret = unf_send_els_rjt_by_rport(v_lport, v_xchg, v_rport, &rjt_info); + + return ret; +} + +static unsigned int unf_pdisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_plogi_pdisc_s *pdisc = NULL; + struct unf_rport_s *rport = NULL; + unsigned long flags = 0; + unsigned int ret = RETURN_OK; + unsigned long long wwpn = 0; + + UNF_CHECK_VALID(0x3489, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3490, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive PDISC. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_PDISC); + pdisc = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->pdisc; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, + &pdisc->payload, + sizeof(struct unf_plogi_payload_s)); + wwpn = (unsigned long long) + (((unsigned long long) + (pdisc->payload.parms.high_port_name) << 32) | + ((unsigned long long)pdisc->payload.parms.low_port_name)); + + rport = unf_find_rport(v_lport, v_sid, wwpn); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find RPort by NPort ID(0x%x). Free exchange and send LOGO", + v_lport->port_id, v_sid); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MINOR, + "[info]Port(0x%x) get exist RPort(0x%x) when receive PDISC with S_Id(0x%x)", + v_lport->port_id, rport->nport_id, v_sid); + + if (v_sid >= UNF_FC_FID_DOM_MGR) + return unf_send_pdisc_rjt(v_lport, rport, v_xchg); + + unf_analysis_pdisc_pld(v_lport, rport, pdisc); + + /* State: READY */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + if (rport->rp_state == UNF_RPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is READY when receiving PDISC", + v_lport->port_id, v_sid); + + spin_unlock_irqrestore(&rport->rport_state_lock, + flags); + + ret = unf_send_pdisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) handle PDISC failed", + v_lport->port_id); + + return ret; + } + + /* Report Down/Up event to scsi */ + unf_update_lport_state_by_linkup_event(v_lport, rport, + rport->options); + } + /* State: Closing */ + else if ((rport->rp_state == UNF_RPORT_ST_CLOSING) && + (rport->session)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, + flags); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + } + /* State: PRLI_WAIT */ + else if (rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + ret = unf_send_pdisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) handle PDISC failed", + v_lport->port_id); + + return ret; + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC, send LOGO", + v_lport->port_id, v_sid, rport->rp_state); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_rport_enter_logo(v_lport, rport); + unf_cm_free_xchg(v_lport, v_xchg); + } + } + + return ret; +} + +static void unf_analysis_adisc_pld(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_adisc_payload_s *v_adisc_pld) +{ + unsigned long long wwpn = INVALID_VALUE64; + unsigned long long wwnn = INVALID_VALUE64; + + UNF_CHECK_VALID(0x3491, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3492, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3493, UNF_TRUE, v_adisc_pld, return); + UNF_REFERNCE_VAR(v_lport); + + wwnn = (unsigned long long) + (((unsigned long long)(v_adisc_pld->high_node_name) << 32) | + ((unsigned long long)v_adisc_pld->low_node_name)); + wwpn = (unsigned long long) + (((unsigned long long)(v_adisc_pld->high_port_name) << 32) | + ((unsigned long long)v_adisc_pld->low_port_name)); + + v_rport->port_name = wwpn; + v_rport->node_name = wwnn; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) save ADISC parameters to RPort(0x%x), WWPN(0x%llx) WWNN(0x%llx) NPort ID(0x%x)", + v_lport->port_id, v_rport->nport_id, + v_rport->port_name, v_rport->node_name, + v_adisc_pld->nport_id); +} + +static unsigned int unf_adisc_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_adisc_payload_s *adisc_pld = NULL; + unsigned long flags = 0; + unsigned long long wwpn = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3494, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3495, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive ADISC. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, + UNF_SERVICE_ITEM_ADISC); + adisc_pld = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->adisc.adisc_payl; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, adisc_pld, + sizeof(struct unf_adisc_payload_s)); + wwpn = (unsigned long long) + (((unsigned long long)(adisc_pld->high_port_name) << 32) | + ((unsigned long long)adisc_pld->low_port_name)); + + rport = unf_find_rport(v_lport, v_sid, wwpn); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find RPort by NPort ID(0x%x). Free exchange and send LOGO", + v_lport->port_id, v_sid); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + + return ret; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MINOR, + "[info]Port(0x%x) get exist RPort(0x%x) when receive ADISC with S_ID(0x%x)", + v_lport->port_id, rport->nport_id, v_sid); + + unf_analysis_adisc_pld(v_lport, rport, adisc_pld); + + /* State: READY */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + if (rport->rp_state == UNF_RPORT_ST_READY) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is READY when receiving ADISC", + v_lport->port_id, v_sid); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Return ACC directly */ + ret = unf_send_adisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send ADISC ACC failed", + v_lport->port_id); + + return ret; + } + + /* Report Down/Up event to SCSI */ + unf_update_lport_state_by_linkup_event(v_lport, rport, + rport->options); + } + /* State: Closing */ + else if ((rport->rp_state == UNF_RPORT_ST_CLOSING) && + (rport->session)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_RECOVER, + rport->nport_id); + if (rport) { + spin_lock_irqsave(&rport->rport_state_lock, flags); + rport->nport_id = v_sid; + spin_unlock_irqrestore(&rport->rport_state_lock, + flags); + + ret = unf_send_adisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ADISC ACC failed", + v_lport->port_id); + + return ret; + } + + unf_update_lport_state_by_linkup_event(v_lport, rport, + rport->options); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find RPort by NPort_ID(0x%x). Free exchange and send LOGO", + v_lport->port_id, v_sid); + + unf_cm_free_xchg(v_lport, v_xchg); + (void)unf_send_logo_by_did(v_lport, v_sid); + } + } + /* State: PRLI_WAIT */ + else if (rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC", + v_lport->port_id, v_sid, rport->rp_state); + + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + ret = unf_send_adisc_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send ADISC ACC failed", + v_lport->port_id); + + return ret; + } + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC, send LOGO", + v_lport->port_id, v_sid, rport->rp_state); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + unf_rport_enter_logo(v_lport, rport); + unf_cm_free_xchg(v_lport, v_xchg); + } + + return ret; +} + +static unsigned int unf_rec_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3496, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3497, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(v_sid); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x) receive REC", v_lport->port_id); + + /* Send rec acc */ + ret = unf_send_rec_acc(v_lport, rport, v_xchg); // discard directly + + return ret; +} + +static unsigned int unf_rrq_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_rrq_s *rrq = NULL; + struct unf_xchg_s *xchg_reused = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + unsigned int sid = 0; + unsigned long flags = 0; + struct unf_rjt_info_s rjt_info = { 0 }; + struct unf_xchg_hot_pool_s *hot_pool = NULL; + + UNF_CHECK_VALID(0x3498, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3499, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_REFERNCE_VAR(rx_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_RRQ); + rrq = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rrq; + ox_id = (unsigned short)(rrq->oxid_rxid >> 16); + rx_id = (unsigned short)(rrq->oxid_rxid); + sid = rrq->sid & UNF_NPORTID_MASK; + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_KEVENT, + "[warn]Receive RRQ. Port(0x%x)<---RPort(0x%x) sfsXchg(0x%p) OX_ID(0x%x,0x%x) RX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg, + ox_id, v_xchg->ox_id, rx_id); + + /* Get R_Port */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) receive RRQ but has no RPort(0x%x)", + v_lport->port_id, v_sid); + + /* NOTE: send LOGO */ + ret = unf_send_logo_by_did(v_lport, sid); + + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + /* Get Target (Abort I/O) exchange context */ + /* UNF_FindXchgByOxId */ + xchg_reused = unf_cm_lookup_xchg_by_id(v_lport, ox_id, sid); + if (!xchg_reused) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) cannot find exchange with OX_ID(0x%x) RX_ID(0x%x) S_ID(0x%x)", + v_lport->port_id, ox_id, rx_id, sid); + + rjt_info.els_cmnd_code = ELS_RRQ; + rjt_info.reason_code = FCXLS_BA_RJT_LOGICAL_ERROR | + FCXLS_LS_RJT_INVALID_OXID_RXID; + + /* NOTE: send ELS RJT */ + if (unf_send_els_rjt_by_rport(v_lport, v_xchg, + rport, &rjt_info) != + RETURN_OK) { + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + return RETURN_OK; + } + + hot_pool = xchg_reused->hot_pool; + if (unlikely(!hot_pool)) { + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "Port(0x%x) OxId(0x%x) Rxid(0x%x) Sid(0x%x) Hot Pool is NULL.", + v_lport->port_id, ox_id, rx_id, sid); + + return ret; + } + + spin_lock_irqsave(&hot_pool->xchg_hot_pool_lock, flags); + xchg_reused->ox_id = INVALID_VALUE16; + xchg_reused->rx_id = INVALID_VALUE16; + spin_unlock_irqrestore(&hot_pool->xchg_hot_pool_lock, flags); + + /* NOTE: release I/O exchange context */ + unf_xchg_ref_dec(xchg_reused, SFS_RESPONSE); + + /* Send RRQ ACC */ + ret = unf_send_rrq_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) can not send RRQ rsp. Xchg(0x%p) Ioxchg(0x%p) OX_RX_ID(0x%x 0x%x) S_ID(0x%x)", + v_lport->port_id, v_xchg, + xchg_reused, ox_id, rx_id, sid); + + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_send_els_rjt_by_rport(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + struct unf_rport_s *v_rport, + struct unf_rjt_info_s *v_rjt_info) +{ + struct unf_els_rjt_s *els_rjt = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = v_xchg; + struct unf_frame_pkg_s pkg = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + UNF_CHECK_VALID(0x3500, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3501, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3502, UNF_TRUE, v_rport, return UNF_RETURN_ERROR); + + xchg->cmnd_code = UNF_SET_ELS_RJT_TYPE(v_rjt_info->els_cmnd_code); + xchg->did = v_rport->nport_id; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = v_lport; + xchg->rport = v_rport; + xchg->disc_rport = NULL; + + xchg->pfn_callback = NULL; + xchg->pfn_ob_callback = NULL; + + unf_fill_package(&pkg, xchg, v_rport); + + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return UNF_RETURN_ERROR; + } + + els_rjt = &fc_entry->els_rjt; + memset(els_rjt, 0, sizeof(struct unf_els_rjt_s)); + unf_fill_rjt_pld(els_rjt, v_rjt_info->reason_code, + v_rjt_info->reason_explanation); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Send LS_RJT for 0x%x %s. Port(0x%x)--->rport(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + v_rjt_info->els_cmnd_code, + (ret != RETURN_OK) ? "failed" : "succeed", + v_lport->port_id, v_rport->nport_id, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + return ret; +} + +static unsigned int unf_els_cmnd_default_handler(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg, + unsigned int v_sid, + unsigned int v_els_cmnd_code) +{ +#define ELS_LCB 0X81 +#define ELS_RDP 0X18 + + struct unf_rport_s *rport = NULL; + struct unf_rjt_info_s rjt_info = { 0 }; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3505, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3506, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + if ((v_els_cmnd_code != ELS_LCB) && (v_els_cmnd_code != ELS_RDP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_ABNORMAL, UNF_KEVENT, + "[info]Receive Unknown ELS command(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_els_cmnd_code, v_lport->port_id, v_sid, + v_xchg->ox_id); + } + + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = v_els_cmnd_code; + rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; + + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + if (rport) + ret = unf_send_els_rjt_by_rport(v_lport, v_xchg, rport, + &rjt_info); + else + ret = unf_send_els_rjt_by_did(v_lport, v_xchg, v_sid, + &rjt_info); + + return ret; +} + +static struct unf_xchg_s *unf_alloc_xchg_for_rcv_cmnd( + struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned int i = 0; + unsigned int offset = 0; + unsigned char *cmnd_pld = NULL; + unsigned int first_dword = 0; + unsigned int alloc_time = 0; + + UNF_CHECK_VALID(0x3508, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3509, UNF_TRUE, v_pkg, return NULL); + + if (!v_pkg->xchg_contex) { + xchg = unf_cm_get_free_xchg(v_lport, UNF_XCHG_TYPE_SFS); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[warn]Port(0x%x) get new exchange failed", + v_lport->port_id); + + return NULL; + } + + offset = (xchg->fcp_sfs_union.sfs_entry.cur_offset); + cmnd_pld = (unsigned char *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; + first_dword = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->sfs_common.frame_head.rctl_did; + + if ((cmnd_pld) || (first_dword != 0) || (offset != 0)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) exchange(0x%p) abnormal, maybe data overrun, start(%llu) command(0x%x)", + v_lport->port_id, xchg, + xchg->alloc_jif, v_pkg->cmnd); + + UNF_PRINT_SFS(UNF_INFO, v_lport->port_id, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + sizeof(union unf_sfs_u)); + } + + memset(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, 0, + sizeof(union unf_sfs_u)); + + v_pkg->xchg_contex = (void *)xchg; + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->fcp_sfs_union.sfs_entry.cur_offset = 0; + alloc_time = xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]; + for (i = 0; i < PKG_MAX_PRIVATE_DATA_SIZE; i++) + xchg->private[i] = v_pkg->private[i]; + + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME] = alloc_time; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } else { + xchg = (struct unf_xchg_s *)v_pkg->xchg_contex; + } + + if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + return NULL; + } + + return xchg; +} + +static unsigned char *unf_calc_big_cmnd_pld_buffer(struct unf_xchg_s *v_xchg, + unsigned int v_cmnd_code) +{ + unsigned char *cmnd_pld = NULL; + void *buf = NULL; + unsigned char *dest = NULL; + + UNF_CHECK_VALID(0x3510, UNF_TRUE, v_xchg, return NULL); + + if (v_cmnd_code == ELS_RSCN) + cmnd_pld = (unsigned char *)v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; + else + cmnd_pld = (unsigned char *)v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; + + if (!cmnd_pld) { + buf = unf_get_one_big_sfs_buf(v_xchg); + if (!buf) + return NULL; + + if (v_cmnd_code == ELS_RSCN) { + memset(buf, 0, sizeof(struct unf_rscn_pld_s)); + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld = buf; + } else { + memset(buf, 0, sizeof(struct unf_echo_payload_s)); + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld = buf; + } + + dest = (unsigned char *)buf; + } else { + dest = (unsigned char *) + (cmnd_pld + v_xchg->fcp_sfs_union.sfs_entry.cur_offset); + } + + return dest; +} + +static unsigned char *unf_calc_other_pld_buffer(struct unf_xchg_s *v_xchg) +{ + unsigned char *dest = NULL; + unsigned int offset = 0; + + UNF_CHECK_VALID(0x3511, UNF_TRUE, v_xchg, return NULL); + + offset = (sizeof(struct unf_fchead_s)) + + (v_xchg->fcp_sfs_union.sfs_entry.cur_offset); + dest = (unsigned char *) + ((unsigned char *) + (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + + offset); + + return dest; +} + +static struct unf_xchg_s *unf_mv_data_2_xchg(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned char *dest = NULL; + unsigned int length = 0; + unsigned long flags = 0; + + UNF_CHECK_VALID(0x3512, UNF_TRUE, v_lport, return NULL); + UNF_CHECK_VALID(0x3513, UNF_TRUE, v_pkg, return NULL); + + xchg = unf_alloc_xchg_for_rcv_cmnd(v_lport, v_pkg); + if (!xchg) + return NULL; + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + + memcpy(&xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->sfs_common.frame_head, + &v_pkg->frame_head, + sizeof(v_pkg->frame_head)); + + if ((v_pkg->cmnd == ELS_RSCN) || (v_pkg->cmnd == ELS_ECHO)) + dest = unf_calc_big_cmnd_pld_buffer(xchg, v_pkg->cmnd); + else + dest = unf_calc_other_pld_buffer(xchg); + + if (!dest) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + return NULL; + } + + if (((xchg->fcp_sfs_union.sfs_entry.cur_offset + + v_pkg->unf_cmnd_pload_bl.length) > + (unsigned int)sizeof(union unf_sfs_u)) && + (v_pkg->cmnd != ELS_RSCN) && + (v_pkg->cmnd != ELS_ECHO)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) excange(0x%p) command(0x%x,0x%x) copy payload overrun(0x%x:0x%x:0x%x)", + v_lport->port_id, xchg, v_pkg->cmnd, + xchg->hot_pool_tag, + xchg->fcp_sfs_union.sfs_entry.cur_offset, + v_pkg->unf_cmnd_pload_bl.length, + (unsigned int)sizeof(union unf_sfs_u)); + + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + + return NULL; + } + + length = v_pkg->unf_cmnd_pload_bl.length; + if (length > 0) + memcpy(dest, v_pkg->unf_cmnd_pload_bl.buffer_ptr, length); + + xchg->fcp_sfs_union.sfs_entry.cur_offset += length; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + return xchg; +} + +static unsigned int unf_logo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_rport_s *logo_rport = NULL; + struct unf_logo_s *logo = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int nport_id = 0; + struct unf_rjt_info_s rjt_info = { 0 }; + + UNF_REFERNCE_VAR(logo); + UNF_CHECK_VALID(0x3514, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3515, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_LOGO); + logo = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->logo; + nport_id = logo->payload.nport_id & UNF_NPORTID_MASK; + + if (v_sid < UNF_FC_FID_DOM_MGR) { + /* R_Port is not fabric port */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]LOGIN: Receive LOGO. Port(0x%x)<---RPort(0x%x) NPort_ID(0x%x) OXID(0x%x)", + v_lport->port_id, v_sid, nport_id, v_xchg->ox_id); + } + + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, &logo->payload, + sizeof(struct unf_logo_payload_s)); + + /* + * 1. S_ID unequal to NPort_ID: + * link down Rport find by NPort_ID immediately + */ + if (nport_id != v_sid) { + logo_rport = unf_get_rport_by_nport_id(v_lport, nport_id); + if (logo_rport) + unf_rport_immediate_linkdown(v_lport, logo_rport); + } + + /* 2. Get R_Port by S_ID (frame header) */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + rport = unf_get_safe_rport(v_lport, rport, UNF_RPORT_REUSE_INIT, + v_sid); // INIT + if (!rport) { + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.els_cmnd_code = ELS_LOGO; + rjt_info.reason_code = UNF_LS_RJT_LOGICAL_ERROR; + rjt_info.reason_explanation = UNF_LS_RJT_NO_ADDITIONAL_INFO; + ret = unf_send_els_rjt_by_did(v_lport, v_xchg, v_sid, + &rjt_info); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) receive LOGO but has no RPort(0x%x)", + v_lport->port_id, v_sid); + + return ret; + } + + /* + * 3. I/O resource release: set ABORT tag + * + * Call by: R_Port remove; RCVD LOGO; RCVD PLOGI; send PLOGI ACC + */ + unf_cm_xchg_mgr_abort_io_by_id(v_lport, rport, v_sid, v_lport->nport_id, + INI_IO_STATE_LOGO); + + /* 4. Send LOGO ACC */ + ret = unf_send_logo_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send LOGO failed", + v_lport->port_id); + + /* + * 5. Do same operations with RCVD LOGO/PRLO & Send LOGO: + * retry (LOGIN or LOGO) or link down immediately + */ + unf_process_rport_after_logo(v_lport, rport); + + return ret; +} + +static unsigned int unf_prlo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + struct unf_prli_prlo_s *prlo = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_REFERNCE_VAR(prlo); + UNF_CHECK_VALID(0x3516, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3517, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Receive PRLO. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_LOGO); + + /* Get (new) R_Port */ + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + rport = unf_get_safe_rport(v_lport, rport, + UNF_RPORT_REUSE_INIT, v_sid); /* INIT */ + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) receive PRLO but has no RPort", + v_lport->port_id); + + /* Discard directly */ + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + + prlo = &v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prlo; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, &prlo->payload, + sizeof(struct unf_pril_payload_s)); + + /* Send PRLO ACC to remote */ + ret = unf_send_prlo_acc(v_lport, rport, v_xchg); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send PRLO ACC failed", + v_lport->port_id); + + /* Enter Enhanced action after LOGO (retry LOGIN or LOGO) */ + unf_process_rport_after_logo(v_lport, rport); + + UNF_REFERNCE_VAR(prlo); + return ret; +} + +static void unf_fill_echo_acc_pld(struct unf_echo_s *v_echo_acc) +{ + struct unf_echo_payload_s *echo_acc_pld = NULL; + + UNF_CHECK_VALID(0x3518, UNF_TRUE, v_echo_acc, return); + + echo_acc_pld = v_echo_acc->echo_pld; + UNF_CHECK_VALID(0x3519, UNF_TRUE, echo_acc_pld, return); + + echo_acc_pld->cmnd = UNF_ELS_CMND_ACC; +} + +static void unf_echo_acc_callback(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport; + + UNF_CHECK_VALID(0x3517, UNF_TRUE, v_xchg, return); + + lport = v_xchg->lport; + + UNF_CHECK_VALID(0x3517, UNF_TRUE, lport, return); + if (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr) { + pci_unmap_single( + lport->low_level_func.dev, + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr, + UNF_ECHO_PAYLOAD_LEN, + DMA_BIDIRECTIONAL); + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr = 0; + } +} + +static unsigned int unf_send_echo_acc(struct unf_lport_s *v_lport, + unsigned int v_did, + struct unf_xchg_s *v_xchg) +{ + struct unf_echo_s *echo_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + dma_addr_t phy_echo_acc_addr; + struct unf_rjt_info_s rjt_info = { 0 }; + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + + UNF_CHECK_VALID(0x3520, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3521, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_ECHO); + v_xchg->did = v_did; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = unf_echo_acc_callback; + + unf_fill_package(&pkg, v_xchg, v_xchg->rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + + echo_acc = &fc_entry->echo_acc; + unf_fill_echo_acc_pld(echo_acc); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + phy_echo_acc_addr = pci_map_single(v_lport->low_level_func.dev, + echo_acc->echo_pld, + UNF_ECHO_PAYLOAD_LEN, + DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(v_lport->low_level_func.dev, + phy_echo_acc_addr)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) pci map err", + v_lport->port_id); + unf_cm_free_xchg(v_lport, v_xchg); + return UNF_RETURN_ERROR; + } + echo_acc->phy_echo_addr = phy_echo_acc_addr; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + if (ret != RETURN_OK) { + pci_unmap_single(v_lport->low_level_func.dev, + phy_echo_acc_addr, + UNF_ECHO_PAYLOAD_LEN, + DMA_BIDIRECTIONAL); + echo_acc->phy_echo_addr = 0; + if (ret == UNF_RETURN_NOT_SUPPORT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) send ECHO reject to RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", + v_lport->port_id, v_did, ox_id, rx_id); + + rjt_info.els_cmnd_code = ELS_ECHO; + rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; + unf_send_els_rjt_by_rport(v_lport, v_xchg, + v_xchg->rport, + &rjt_info); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) send ECHO ACC to RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x) failed", + v_lport->port_id, v_did, ox_id, rx_id); + + unf_cm_free_xchg((void *)v_lport, (void *)v_xchg); + } + } + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_echo_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_echo_payload_s *echo_pld = NULL; + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int data_len = 0; + + UNF_CHECK_VALID(0x3522, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3523, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + data_len = v_xchg->fcp_sfs_union.sfs_entry.cur_offset; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Receive ECHO. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x))", + v_lport->port_id, v_sid, v_xchg->ox_id); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_ECHO); + echo_pld = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; + UNF_PRINT_SFS_LIMIT(UNF_INFO, v_lport->port_id, echo_pld, data_len); + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + v_xchg->rport = rport; + + ret = unf_send_echo_acc(v_lport, v_sid, v_xchg); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send ECHO ACC failed", + v_lport->port_id); + + UNF_REFERNCE_VAR(echo_pld); + UNF_REFERNCE_VAR(data_len); + return ret; +} + +static unsigned int unf_check_els_cmnd_valid(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg, + struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = v_lport; + struct unf_frame_pkg_s *ppkg = v_fra_pkg; + struct unf_xchg_s *xchg = v_xchg; + struct unf_rjt_info_s rjt_info = { 0 }; + struct unf_lport_s *vport = NULL; + unsigned int sid = 0; + unsigned int did = 0; + + sid = (ppkg->frame_head.csctl_sid) & UNF_NPORTID_MASK; + did = (ppkg->frame_head.rctl_did) & UNF_NPORTID_MASK; + + memset(&rjt_info, 0, sizeof(struct unf_rjt_info_s)); + rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; + + if ((ppkg->cmnd == ELS_FLOGI) && + (lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) receive FLOGI in top (0x%x) and send LS_RJT", + lport->port_id, lport->en_act_topo); + + rjt_info.els_cmnd_code = ELS_FLOGI; + (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); + + return UNF_RETURN_ERROR; + } + + if ((ppkg->cmnd == ELS_PLOGI) && (did >= UNF_FC_FID_DOM_MGR)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x)receive PLOGI with wellknown address(0x%x) and Send LS_RJT", + lport->port_id, did); + + rjt_info.els_cmnd_code = ELS_PLOGI; + (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); + + return UNF_RETURN_ERROR; + } + + if (((lport->nport_id == 0) || + (lport->nport_id == INVALID_VALUE32)) && + (NEED_REFRESH_NPORTID(ppkg))) { + lport->nport_id = did; + } else if ((did != lport->nport_id) && (ppkg->cmnd != ELS_FLOGI)) { + vport = unf_cm_lookup_vport_by_did(lport, did); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) receive ELS cmd(0x%x) with abnormal D_ID(0x%x)", + lport->nport_id, ppkg->cmnd, did); + + unf_cm_free_xchg(lport, xchg); + return UNF_RETURN_ERROR; + } + } + + return RETURN_OK; +} + +static unsigned int unf_rcv_els_cmnd_req(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int i = 0; + unsigned int sid = 0; + unsigned int did = 0; + struct unf_lport_s *vport = NULL; + unsigned int (*pfn_els_cmnd_handler)(struct unf_lport_s *, unsigned int, + struct unf_xchg_s *) = NULL; + + sid = (v_fra_pkg->frame_head.csctl_sid) & UNF_NPORTID_MASK; + did = (v_fra_pkg->frame_head.rctl_did) & UNF_NPORTID_MASK; + + xchg = unf_mv_data_2_xchg(v_lport, v_fra_pkg); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) receive ElsCmnd(0x%x), exchange is NULL", + v_lport->port_id, v_fra_pkg->cmnd); + + return UNF_RETURN_ERROR; + } + + if (v_fra_pkg->last_pkg_flag != UNF_TRUE) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Exchange(%u) waiting for last WQE", + xchg->hot_pool_tag); + + return RETURN_OK; + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Exchange(%u) get last WQE", xchg->hot_pool_tag); + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) { + xchg->ox_id = UNF_GET_OXID(v_fra_pkg); + xchg->abort_oxid = xchg->ox_id; + xchg->rx_id = xchg->hot_pool_tag; + } + xchg->cmnd_code = v_fra_pkg->cmnd; + + ret = unf_check_els_cmnd_valid(v_lport, v_fra_pkg, xchg); + if (ret != RETURN_OK) { + /* NOTE: exchange has been released */ + return UNF_RETURN_ERROR; + } + + if ((did != v_lport->nport_id) && (v_fra_pkg->cmnd != ELS_FLOGI)) { + vport = unf_cm_lookup_vport_by_did(v_lport, did); + if (!vport) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) received unknown ELS command with S_ID(0x%x) D_ID(0x%x))", + v_lport->port_id, sid, did); + + return UNF_RETURN_ERROR; + } + v_lport = vport; + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_INFO, + "[info]VPort(0x%x) received ELS command with S_ID(0x%x) D_ID(0x%x)", + v_lport->port_id, sid, did); + } + + do { + if ((v_fra_pkg->cmnd) == els_handle[i].cmnd) { + pfn_els_cmnd_handler = + els_handle[i].pfn_els_cmnd_handler; + break; + } + + i++; + } while (i < (sizeof(els_handle) / + sizeof(struct unf_els_handler_table))); + + if (pfn_els_cmnd_handler) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_INFO, + "[info]Port(0x%x) receive ELS(0x%x) from RPort(0x%x) and process it", + v_lport->port_id, v_fra_pkg->cmnd, sid); + + ret = pfn_els_cmnd_handler(v_lport, sid, xchg); + } else { + ret = unf_els_cmnd_default_handler(v_lport, xchg, sid, + v_fra_pkg->cmnd); + } + + return ret; +} + +static unsigned int unf_send_els_rsp_succ(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + void (*pfn_ob_callback)(struct unf_xchg_s *) = NULL; + + UNF_CHECK_VALID(0x3529, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3530, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if (!v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) lookup exchange by tag function is NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag( + (void *)v_lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) find exhange by tag(0x%x) failed", + v_lport->port_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_ob_callback) && + (!(xchg->io_state & TGT_IO_STATE_ABORT))) { + pfn_ob_callback = xchg->pfn_ob_callback; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) with exchange(0x%p) tag(%u) do callback", + v_lport->port_id, xchg, hot_pool_tag); + + pfn_ob_callback(xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + return ret; +} + +static unsigned char *unf_calc_big_resp_pld_buffer(struct unf_xchg_s *v_xchg, + unsigned int v_cmnd_code) +{ + unsigned char *resp_pld = NULL; + unsigned char *dest = NULL; + + UNF_CHECK_VALID(0x3510, UNF_TRUE, v_xchg, return NULL); + + if (v_cmnd_code == ELS_ECHO) + resp_pld = (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; + else + resp_pld = (unsigned char *) + v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->get_id.gid_rsp.gid_acc_pld; + + if (resp_pld) + dest = (unsigned char *) + (resp_pld + v_xchg->fcp_sfs_union.sfs_entry.cur_offset); + + return dest; +} + +static unsigned char *unf_calc_other_resp_pld_buffer(struct unf_xchg_s *v_xchg) +{ + unsigned char *dest = NULL; + unsigned int offset = 0; + + UNF_CHECK_VALID(0x3511, UNF_TRUE, v_xchg, return NULL); + + offset = v_xchg->fcp_sfs_union.sfs_entry.cur_offset; + dest = (unsigned char *)((unsigned char *) + (v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + offset); + + return dest; +} + +static unsigned int unf_mv_resp_2_xchg(struct unf_xchg_s *v_xchg, + struct unf_frame_pkg_s *v_pkg) +{ + unsigned char *dest = NULL; + unsigned int length = 0; + unsigned int offset = 0; + unsigned int max_frame_len = 0; + unsigned long flags = 0; + + spin_lock_irqsave(&v_xchg->xchg_state_lock, flags); + + if (UNF_NEED_BIG_RESPONSE_BUFF(v_xchg->cmnd_code)) { + dest = unf_calc_big_resp_pld_buffer(v_xchg, + v_xchg->cmnd_code); + offset = 0; + max_frame_len = sizeof(struct unf_gif_acc_pld_s); + } else if (v_xchg->cmnd_code == NS_GA_NXT || + v_xchg->cmnd_code == NS_GIEL) { + dest = unf_calc_big_resp_pld_buffer(v_xchg, + v_xchg->cmnd_code); + offset = 0; + max_frame_len = + v_xchg->fcp_sfs_union.sfs_entry.sfs_buff_len; + } else { + dest = unf_calc_other_resp_pld_buffer(v_xchg); + offset = sizeof(struct unf_fchead_s); + max_frame_len = sizeof(union unf_sfs_u); + } + + if (!dest) { + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + return UNF_RETURN_ERROR; + } + + if (v_xchg->fcp_sfs_union.sfs_entry.cur_offset == 0) { + v_xchg->fcp_sfs_union.sfs_entry.cur_offset += offset; + dest = dest + offset; + } + + length = v_pkg->unf_cmnd_pload_bl.length; + + if ((v_xchg->fcp_sfs_union.sfs_entry.cur_offset + length) > + max_frame_len) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Exchange(0x%p) command(0x%x) hotpooltag(0x%x) OX_RX_ID(0x%x) S_ID(0x%x) D_ID(0x%x) copy payload overrun(0x%x:0x%x:0x%x)", + v_xchg, v_xchg->cmnd_code, v_xchg->hot_pool_tag, + v_pkg->frame_head.oxid_rxid, + v_pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, + v_pkg->frame_head.rctl_did & UNF_NPORTID_MASK, + v_xchg->fcp_sfs_union.sfs_entry.cur_offset, + v_pkg->unf_cmnd_pload_bl.length, + max_frame_len); + + length = max_frame_len - v_xchg->fcp_sfs_union.sfs_entry.cur_offset; + } + + if (length > 0) + memcpy(dest, v_pkg->unf_cmnd_pload_bl.buffer_ptr, length); + + v_xchg->fcp_sfs_union.sfs_entry.cur_offset += length; + spin_unlock_irqrestore(&v_xchg->xchg_state_lock, flags); + + return RETURN_OK; +} + +static unsigned int unf_send_els_cmnd_succ(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + void (*pfn_callback)(void *, void *, void *) = NULL; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3531, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3532, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = v_lport; + + if (!lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) lookup exchange by tag function can't be NULL", + lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag((void *)lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", + lport->port_id, lport->nport_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + if (((v_fra_pkg->frame_head.csctl_sid) & UNF_NPORTID_MASK) != + xchg->did) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find exhange invalid, package S_ID(0x%x) exchange S_ID(0x%x) D_ID(0x%x)", + lport->port_id, v_fra_pkg->frame_head.csctl_sid, + xchg->sid, xchg->did); + + return UNF_RETURN_ERROR; + } + + if (v_fra_pkg->last_pkg_flag == UNF_PKG_NOT_LAST_RESPONSE) { + ret = unf_mv_resp_2_xchg(xchg, v_fra_pkg); + + return ret; + } + + xchg->byte_orders = v_fra_pkg->byte_orders; + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_callback) && + ((xchg->cmnd_code == ELS_RRQ) || + (xchg->cmnd_code == ELS_LOGO) || + (!(xchg->io_state & TGT_IO_STATE_ABORT)))) { + pfn_callback = xchg->pfn_callback; + + if ((xchg->cmnd_code == ELS_FLOGI) || + (xchg->cmnd_code == ELS_FDISC)) + xchg->sid = v_fra_pkg->frame_head.rctl_did & + UNF_NPORTID_MASK; + + if (xchg->cmnd_code == ELS_ECHO) { + xchg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_CMD_RCV_TIME]; + xchg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_RSP_SND_TIME]; + xchg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_CMD_SND_TIME]; + xchg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME] = + v_fra_pkg->private[PKG_PRIVATE_ECHO_ACC_RCV_TIME]; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* Do callback */ + pfn_callback(xchg->lport, xchg->rport, xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)lport, (void *)xchg); + return ret; +} + +static unsigned int unf_send_els_cmnd_failed(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + void (*pfn_ob_callback)(struct unf_xchg_s *) = NULL; + + UNF_CHECK_VALID(0x3533, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3534, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if (!v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) lookup exchange by tag function can't be NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag( + (void *)v_lport, + hot_pool_tag)); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) find exhange by tag(0x%x) failed", + v_lport->port_id, v_lport->nport_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + v_lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_ob_callback) && + ((xchg->cmnd_code == ELS_RRQ) || + (xchg->cmnd_code == ELS_LOGO) || + (!(xchg->io_state & TGT_IO_STATE_ABORT)))) { + pfn_ob_callback = xchg->pfn_ob_callback; + xchg->ob_callback_sts = v_fra_pkg->status; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + pfn_ob_callback(xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) exchange(0x%p) tag(0x%x) do callback", + v_lport->port_id, xchg, hot_pool_tag); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + return ret; +} + +static unsigned int unf_rcv_els_cmnd_reply(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3535, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3536, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if ((v_fra_pkg->status == UNF_IO_SUCCESS) || + (v_fra_pkg->status == UNF_IO_UNDER_FLOW)) + ret = unf_send_els_cmnd_succ(v_lport, v_fra_pkg); + else + ret = unf_send_els_cmnd_failed(v_lport, v_fra_pkg); + + return ret; +} + +void unf_lport_enter_msn_plogi(struct unf_lport_s *v_lport) +{ + /* Fabric or Public Loop Mode: Login with Name server */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + struct unf_plogi_payload_s *plogi_pld = NULL; + union unf_sfs_u *fc_entry = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_frame_pkg_s pkg; + + UNF_CHECK_VALID(0x1811, UNF_TRUE, v_lport, return); + + /* Get (safe) R_Port */ + rport = unf_rport_get_free_and_init(v_lport, UNF_PORT_TYPE_FC, + UNF_FC_FID_MGMT_SERV); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) allocate RPort failed", + v_lport->port_id); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_MGMT_SERV; // 0xfffffa + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + + /* Get & Set new free exchange */ + xchg = unf_cm_get_free_xchg(v_lport, UNF_XCHG_TYPE_SFS); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) exchange can't be NULL for PLOGI", + v_lport->port_id); + + return; + } + + xchg->cmnd_code = ELS_PLOGI; // PLOGI + xchg->did = rport->nport_id; + xchg->sid = v_lport->nport_id; + xchg->oid = xchg->sid; + xchg->lport = lport; + xchg->rport = rport; + + if (v_lport->low_level_func.xchg_mgr_type == + UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE) + xchg->ox_id = xchg->hot_pool_tag; + + /* Set callback function */ + xchg->pfn_callback = NULL; // for rcvd plogi acc/rjt processer + xchg->pfn_ob_callback = NULL; // for send plogi failed processer + + unf_fill_package(&pkg, xchg, rport); + + /* Fill PLOGI payload */ + fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, xchg->hot_pool_tag); + + unf_cm_free_xchg(v_lport, xchg); + return; + } + + plogi_pld = &fc_entry->plogi.payload; + memset(plogi_pld, 0, sizeof(struct unf_plogi_payload_s)); + unf_fill_plogi_pld(plogi_pld, v_lport); + + /* Start to Send PLOGI command */ + ret = unf_els_cmnd_send(v_lport, &pkg, xchg); + if (ret != RETURN_OK) + unf_cm_free_xchg((void *)v_lport, (void *)xchg); +} + +static void unf_register_to_switch(struct unf_lport_s *v_lport) +{ + /* Register to Fabric, used for: FABRIC & PUBLI LOOP */ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3542, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + /* LPort: FLOGI_WAIT --> PLOGI_WAIT */ + unf_lport_stat_ma(v_lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); + + /* Login with Name server: PLOGI */ + unf_lport_enter_sns_plogi(v_lport); + + unf_lport_enter_msn_plogi(v_lport); + + if ((v_lport->root_lport == v_lport) &&/* Physical Port */ + (v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC)) { + unf_linkup_all_vports(v_lport); + } +} + +void unf_login_with_loop_node(struct unf_lport_s *v_lport, unsigned int v_alpa) +{ + /* Only used for Private Loop LOGIN */ + struct unf_rport_s *rport = NULL; + unsigned long rport_flag = 0; + unsigned int port_feature = 0; + unsigned int ret; + + /* Check AL_PA validity */ + if (v_lport->nport_id == v_alpa) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) is the same as RPort with AL_PA(0x%x), do nothing", + v_lport->port_id, v_alpa); + return; + } + + if (v_alpa == 0) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) RPort(0x%x) is fabric, do nothing", + v_lport->port_id, v_alpa); + return; + } + + /* Get & set R_Port: reuse only */ + rport = unf_get_rport_by_nport_id(v_lport, v_alpa); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: Port(0x%x_0x%x) RPort(0x%x_0x%p) login with private loop", + v_lport->port_id, v_lport->nport_id, v_alpa, rport); + + rport = unf_get_safe_rport(v_lport, rport, UNF_RPORT_REUSE_ONLY, + v_alpa); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) allocate new RPort(0x%x) failed", + v_lport->port_id, v_lport->nport_id, v_alpa); + return; + } + + /* Update R_Port state & N_Port_ID */ + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + rport->nport_id = v_alpa; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); // PLOGI_WAIT + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + + /* Private Loop: check whether need delay to send PLOGI or not */ + port_feature = rport->options; + + /* check Rport and Lport feature */ + if ((port_feature == UNF_PORT_MODE_UNKNOWN) && + (v_lport->options == UNF_PORT_MODE_INI)) { + /* Start to send PLOGI */ + ret = unf_send_plogi(v_lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI to RPort(0x%x) failed", + v_lport->port_id, v_lport->nport_id, + rport->nport_id); + + unf_rport_error_recovery(rport); + } + } else { + unf_check_rport_need_delay_plogi(v_lport, rport, port_feature); + } +} + +unsigned int unf_receive_els_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3543, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3544, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = (struct unf_lport_s *)v_lport; + + switch (v_fra_pkg->type) { + case UNF_PKG_ELS_REQ_DONE: + ret = unf_rcv_els_cmnd_reply(lport, v_fra_pkg); + break; + + case UNF_PKG_ELS_REQ: + ret = unf_rcv_els_cmnd_req(lport, v_fra_pkg); + break; + + default: + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) with exchange type(0x%x) abnormal", + lport->port_id, lport->nport_id, v_fra_pkg->type); + break; + } + + return ret; +} + +unsigned int unf_send_els_done(void *v_lport, struct unf_frame_pkg_s *v_pkg) +{ + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3545, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3546, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + + if (v_pkg->type == UNF_PKG_ELS_REPLY_DONE) { + if ((v_pkg->status == UNF_IO_SUCCESS) || + (v_pkg->status == UNF_IO_UNDER_FLOW)) + ret = unf_send_els_rsp_succ(v_lport, v_pkg); + else + ret = unf_send_els_cmnd_failed(v_lport, v_pkg); + } + + return ret; +} + +static unsigned int unf_rcv_gs_cmnd_reply(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned long flags = 0; + unsigned short hot_pool_tag = 0; + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + void (*pfn_callback)(void *, void *, void *) = NULL; + + UNF_CHECK_VALID(0x3553, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3554, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + lport = v_lport; + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + + xchg = (struct unf_xchg_s *)unf_cm_lookup_xchg_by_tag( + (void *)lport, hot_pool_tag); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find exhange by tag(0x%x) failed", + lport->port_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + if (v_fra_pkg->last_pkg_flag == UNF_PKG_NOT_LAST_RESPONSE) { + ret = unf_mv_resp_2_xchg(xchg, v_fra_pkg); + return ret; + } + + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_callback) && (!(xchg->io_state & TGT_IO_STATE_ABORT))) { + pfn_callback = xchg->pfn_callback; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + pfn_callback(xchg->lport, xchg->rport, xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)lport, (void *)xchg); + return ret; +} + +static unsigned int unf_send_gs_cmnd_failed(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_xchg_s *xchg = NULL; + unsigned int ret = RETURN_OK; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + + void (*pfn_ob_callback)(struct unf_xchg_s *) = NULL; + + UNF_CHECK_VALID(0x3555, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3556, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + if (!v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) loopup exchange by tag function can't be NULL", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + hot_pool_tag = (unsigned short) + (v_fra_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); + xchg = (struct unf_xchg_s *) + (v_lport->xchg_mgr_temp.pfn_unf_look_up_xchg_by_tag( + (void *)v_lport, + hot_pool_tag)); + + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find exhange by tag(0x%x)", + v_lport->port_id, hot_pool_tag); + + return UNF_RETURN_ERROR; + } + + UNF_CHECK_ALLOCTIME_VALID( + v_lport, hot_pool_tag, xchg, + v_fra_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if ((xchg->pfn_ob_callback) && + (!(xchg->io_state & TGT_IO_STATE_ABORT))) { + pfn_ob_callback = xchg->pfn_ob_callback; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + pfn_ob_callback(xchg); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + unf_cm_free_xchg((void *)v_lport, (void *)xchg); + return ret; +} + +unsigned int unf_receive_gs_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3557, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3558, UNF_TRUE, v_fra_pkg, return UNF_RETURN_ERROR); + + lport = (struct unf_lport_s *)v_lport; + + if ((v_fra_pkg->type) == UNF_PKG_GS_REQ_DONE) { + if ((v_fra_pkg->status == UNF_IO_SUCCESS) || + (v_fra_pkg->status == UNF_IO_UNDER_FLOW) || + (v_fra_pkg->status == UNF_IO_OVER_FLOW)) + ret = unf_rcv_gs_cmnd_reply(lport, v_fra_pkg); + else + ret = unf_send_gs_cmnd_failed(lport, v_fra_pkg); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) exchange type(0x%x) mismatch", + lport->port_id, v_fra_pkg->type); + + return UNF_RETURN_ERROR; + } + + return ret; +} + +static void unf_handle_init_gid_acc(struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + /* + * from SCR ACC callback + * NOTE: inquiry disc R_Port used for NPIV + */ + struct unf_disc_rport_s *disc_rport = NULL; + struct unf_disc_s *disc = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned int gid_port_id = 0; + unsigned int nport_id = 0; + unsigned int i = 0; + unsigned char control = 0; + + UNF_CHECK_VALID(0x3559, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3560, UNF_TRUE, v_lport, return); + + /* + * 1. Find & Check & Get (new) R_Port from list_disc_rports_pool + * then, Add to R_Port Disc_busy_list + */ + while (i < UNF_GID_PORT_CNT) { + gid_port_id = (v_gid_acc_pld->gid_port_id[i]); + nport_id = UNF_NPORTID_MASK & gid_port_id; + control = UNF_GID_CONTROL(gid_port_id); + + /* for each N_Port_ID from GID_ACC payload */ + if ((nport_id != v_lport->nport_id) && (nport_id != 0) && + (!unf_lookup_lport_by_nport_id(v_lport, nport_id))) { + /* for New Port, not L_Port */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) get nportid(0x%x) from GID_ACC", + v_lport->port_id, v_lport->nport_id, + nport_id); + + /* Get R_Port from list of RPort Disc Pool */ + disc_rport = + unf_rport_get_free_and_init(v_lport, + UNF_PORT_TYPE_DISC, + nport_id); + if (!disc_rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) can't allocate new rport(0x%x) from disc pool", + v_lport->port_id, + v_lport->nport_id, + nport_id); + + i++; + continue; + } + } + + if ((control & UNF_GID_LAST_PORT_ID) == UNF_GID_LAST_PORT_ID) + break; + + i++; + } + + /* + * 2. Do port disc stop operation: + * NOTE: Do DISC & release R_Port from + * busy_list back to list_disc_rports_pool + */ + disc = &v_lport->disc; + if (!disc->unf_disc_temp.pfn_unf_disc_stop) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) disc stop function is NULL", + v_lport->port_id, v_lport->nport_id); + + return; + } + + ret = disc->unf_disc_temp.pfn_unf_disc_stop(v_lport); + if (ret != RETURN_OK) + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) do disc stop failed", + v_lport->port_id, v_lport->nport_id); +} + +void unf_rport_immediate_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Swap case: Report Link Down immediately & release R_Port */ + unsigned long flags = 0; + struct unf_disc_s *disc = NULL; + + UNF_CHECK_VALID(0x3561, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3562, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&v_rport->rport_state_lock, flags); + /* 1. Inc R_Port ref_cnt */ + if (unf_rport_ref_inc(v_rport) != RETURN_OK) { + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) Rport(0x%p,0x%x) is removing and no need process", + v_lport->port_id, v_rport, v_rport->nport_id); + + return; + } + + /* 2. R_PORT state update: Link Down Event --->>> closing state */ + unf_rport_state_ma(v_rport, UNF_EVENT_RPORT_LINK_DOWN); + spin_unlock_irqrestore(&v_rport->rport_state_lock, flags); + + /* 3. Put R_Port from busy to destroy list */ + disc = &v_lport->disc; + spin_lock_irqsave(&disc->rport_busy_pool_lock, flags); + list_del_init(&v_rport->entry_rport); + list_add_tail(&v_rport->entry_rport, &disc->list_destroy_rports); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags); + + /* 4. Schedule Closing work (Enqueuing workqueue) */ + unf_schedule_closing_work(v_lport, v_rport); + + unf_rport_ref_dec(v_rport); +} + +static unsigned int unf_rport_check_wwn(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send GPN_ID */ + struct unf_rport_s *sns_port = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3564, UNF_TRUE, v_lport, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3565, UNF_TRUE, v_rport, + return UNF_RETURN_ERROR); + + /* Get SNS R_Port */ + sns_port = unf_get_rport_by_nport_id(v_lport, + UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find fabric Port", + v_lport->nport_id); + + return UNF_RETURN_ERROR; + } + + /* Send GPN_ID to SW */ + ret = unf_get_and_post_disc_event(v_lport, sns_port, v_rport->nport_id, + UNF_DISC_GET_PORT_NAME); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + v_lport->nport_id, UNF_DISC_GET_PORT_NAME, + v_rport->nport_id); + + unf_rcv_gpn_id_rsp_unknown(v_lport, v_rport->nport_id); + } + + return ret; +} + +static unsigned int unf_handle_rscn_port_not_in_disc( + struct unf_lport_s *v_lport, + unsigned int v_rscn_nport_id) +{ + /* RSCN Port_ID not in GID_ACC payload table: Link Down */ + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3566, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* from R_Port busy list by N_Port_ID */ + rport = unf_get_rport_by_nport_id(v_lport, v_rscn_nport_id); + if (rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x) RPort(0x%x) wwpn(0x%llx) has been removed and link down it", + v_lport->port_id, v_rscn_nport_id, + rport->port_name); + + unf_rport_linkdown(v_lport, rport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) has no RPort(0x%x) and do nothing", + v_lport->nport_id, v_rscn_nport_id); + } + + return ret; +} + +static unsigned int unf_handle_rscn_port_in_disc(struct unf_lport_s *v_lport, + unsigned int v_rscn_nport_id) +{ + /* Send GPN_ID or re-login(GNN_ID) */ + struct unf_rport_s *rport = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3567, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* from R_Port busy list by N_Port_ID */ + rport = unf_get_rport_by_nport_id(v_lport, v_rscn_nport_id); + if (rport) { + /* R_Port exist: send GPN_ID */ + ret = unf_rport_check_wwn(v_lport, rport); + } else { + if ((v_lport->options & UNF_PORT_MODE_INI) == + UNF_PORT_MODE_INI) { + /* Re-LOGIN with INI mode: Send GNN_ID */ + ret = unf_rport_relogin(v_lport, v_rscn_nport_id); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x) with no INI feature. Do nothing", + v_lport->nport_id); + } + } + + return ret; +} + +static unsigned int unf_handle_rscn_port_addr( + struct unf_port_id_page_s *v_port_id_page, + struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + /* + * Input parameters: + * 1. Port_ID_page: saved from RSCN payload + * 2. GID_ACC_payload: back from GID_ACC (GID_PT or GID_FT) + ** + * Do work: check whether RSCN Port_ID within GID_ACC payload or not + * then, re-login or link down rport + */ + unsigned int rscn_nport_id = 0; + unsigned int gid_port_id = 0; + unsigned int nport_id = 0; + unsigned int i = 0; + unsigned char control = 0; + unsigned int ret = RETURN_OK; + enum int_e have_same_id = UNF_FALSE; + + UNF_CHECK_VALID(0x3568, UNF_TRUE, v_port_id_page, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3569, UNF_TRUE, v_gid_acc_pld, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3570, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + /* 1. get RSCN_NPort_ID from (L_Port->Disc->RSCN_Mgr)->RSCN_Port_ID_Page */ + rscn_nport_id = UNF_SERVICE_GET_NPORTID_FORM_GID_PAGE(v_port_id_page); + + /* + * 2. for RSCN_NPort_ID + * check whether RSCN_NPort_ID within GID_ACC_Payload or not + */ + while (i < UNF_GID_PORT_CNT) { /* 4k */ + gid_port_id = (v_gid_acc_pld->gid_port_id[i]); + nport_id = UNF_NPORTID_MASK & gid_port_id; + control = UNF_GID_CONTROL(gid_port_id); + + if ((v_lport->nport_id != nport_id) && (nport_id != 0)) { + /* is not L_Port */ + if (rscn_nport_id == nport_id) { + /* RSCN Port_ID within GID_ACC payload */ + have_same_id = UNF_TRUE; + break; + } + } + + if ((control & UNF_GID_LAST_PORT_ID) == UNF_GID_LAST_PORT_ID) + break; + + i++; + } + + /* 3. RSCN_Port_ID not within GID_ACC payload table */ + if (have_same_id == UNF_FALSE) { + /* rport has been removed */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[warn]Port(0x%x_0x%x) find RSCN N_Port_ID(0x%x) in GID_ACC table failed", + v_lport->port_id, v_lport->nport_id, + rscn_nport_id); + + /* Link down rport */ + ret = unf_handle_rscn_port_not_in_disc(v_lport, + rscn_nport_id); + } else { /* 4. RSCN_Port_ID within GID_ACC payload table */ + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x_0x%x) find RSCN N_Port_ID(0x%x) in GID_ACC table succeed", + v_lport->port_id, v_lport->nport_id, + rscn_nport_id); + + /* Re-login with INI mode */ + ret = unf_handle_rscn_port_in_disc(v_lport, rscn_nport_id); + } + + return ret; +} + +static void unf_check_rport_rscn_process( + struct unf_rport_s *v_rport, + struct unf_port_id_page_s *v_port_id_page) +{ + struct unf_rport_s *rport = v_rport; + struct unf_port_id_page_s *port_id_page = v_port_id_page; + unsigned char format = port_id_page->uc_addr_format; + + switch (format) { + /* domain+area */ + case UNF_RSCN_AREA_ADDR_GROUP: + if (UNF_GET_DOMAIN_ID(rport->nport_id) == + port_id_page->port_id_domain && + UNF_GET_AREA_ID(rport->nport_id) == + port_id_page->port_id_area) { + rport->rscn_position = UNF_RPORT_NEED_PROCESS; + } + break; + /* domain */ + case UNF_RSCN_DOMAIN_ADDR_GROUP: + if (UNF_GET_DOMAIN_ID(rport->nport_id) == + port_id_page->port_id_domain) + rport->rscn_position = UNF_RPORT_NEED_PROCESS; + break; + /* all */ + case UNF_RSCN_FABRIC_ADDR_GROUP: + rport->rscn_position = UNF_RPORT_NEED_PROCESS; + break; + default: + break; + } +} + +static void unf_set_rport_rscn_position( + struct unf_lport_s *v_lport, + struct unf_port_id_page_s *v_port_id_page) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long disc_flag = 0; + unsigned long rport_flag = 0; + + UNF_CHECK_VALID(0x3571, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + if (rport->rscn_position == UNF_RPORT_NOT_NEED_PROCESS) + unf_check_rport_rscn_process(rport, + v_port_id_page); + } else { + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); +} + +static void unf_set_rport_rscn_position_local(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long disc_flag = 0; + unsigned long rport_flag = 0; + + UNF_CHECK_VALID(0x3572, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + if (rport->rscn_position == UNF_RPORT_NEED_PROCESS) + rport->rscn_position = + UNF_RPORT_ONLY_IN_LOCAL_PROCESS; + } else { + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + } + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); +} + +static void unf_reset_rport_rscn_setting(struct unf_lport_s *v_lport) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long rport_flag = 0; + + UNF_CHECK_VALID(0x3573, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + list_for_each_safe(node, next_node, &disc->list_busy_rports) { + rport = list_entry(node, struct unf_rport_s, entry_rport); + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } +} + +static void unf_compare_nport_id_with_rport_list( + struct unf_lport_s *v_lport, + unsigned int v_nport_id, + struct unf_port_id_page_s *v_port_id_page) +{ + struct unf_rport_s *rport = NULL; + unsigned long rport_flag = 0; + unsigned char format = v_port_id_page->uc_addr_format; + + UNF_CHECK_VALID(0x3574, UNF_TRUE, v_lport, return); + + switch (format) { + /* domain+area */ + case UNF_RSCN_AREA_ADDR_GROUP: + if ((UNF_GET_DOMAIN_ID(v_nport_id) != + v_port_id_page->port_id_domain) || + (UNF_GET_AREA_ID(v_nport_id) != + v_port_id_page->port_id_area)) + return; + break; + /* domain */ + case UNF_RSCN_DOMAIN_ADDR_GROUP: + if (UNF_GET_DOMAIN_ID(v_nport_id) != + v_port_id_page->port_id_domain) + return; + break; + /* all */ + case UNF_RSCN_FABRIC_ADDR_GROUP: + break; + /* can't enter this branch guarantee by outer */ + default: + break; + } + + rport = unf_get_rport_by_nport_id(v_lport, v_nport_id); + + if (!rport) { + if ((v_lport->options & UNF_PORT_MODE_INI) == + UNF_PORT_MODE_INI) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[event]Port(0x%x) Find Rport(0x%x) by RSCN", + v_lport->nport_id, v_nport_id); + unf_rport_relogin(v_lport, v_nport_id); + } + } else { + spin_lock_irqsave(&rport->rport_state_lock, rport_flag); + if (rport->rscn_position == UNF_RPORT_NEED_PROCESS) + rport->rscn_position = + UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS; + + spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); + } +} + +static void unf_compare_disc_with_local_rport( + struct unf_lport_s *v_lport, + struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_port_id_page_s *v_port_id_page) +{ + unsigned int gid_port_id = 0; + unsigned int nport_id = 0; + unsigned int i = 0; + unsigned char control = 0; + + UNF_CHECK_VALID(0x3575, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3576, UNF_TRUE, v_lport, return); + + while (i < UNF_GID_PORT_CNT) { + gid_port_id = (v_gid_acc_pld->gid_port_id[i]); + nport_id = UNF_NPORTID_MASK & gid_port_id; + control = UNF_GID_CONTROL(gid_port_id); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]Port(0x%x) DISC N_Port_ID(0x%x)", + v_lport->nport_id, nport_id); + + if ((nport_id != 0) && + (!unf_lookup_lport_by_nport_id(v_lport, nport_id))) + unf_compare_nport_id_with_rport_list(v_lport, nport_id, + v_port_id_page); + + if ((UNF_GID_LAST_PORT_ID & control) == UNF_GID_LAST_PORT_ID) + break; + + i++; + } + + unf_set_rport_rscn_position_local(v_lport); +} + +static unsigned int unf_process_each_rport_after_rscn( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + struct unf_rport_s *v_rport) +{ + unsigned long rport_flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3577, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3578, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3579, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_sns_port); + + spin_lock_irqsave(&v_rport->rport_state_lock, rport_flag); + + if (v_rport->rscn_position == UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[info]Port(0x%x_0x%x) RPort(0x%x) rescan position(0x%x), check wwpn", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport->rscn_position); + v_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_flag); + ret = unf_rport_check_wwn(v_lport, v_rport); + } else if (v_rport->rscn_position == + UNF_RPORT_ONLY_IN_LOCAL_PROCESS) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_KEVENT, + "[event]Port(0x%x_0x%x) RPort(0x%x) rescan position(0x%x), linkdown it", + v_lport->port_id, v_lport->nport_id, + v_rport->nport_id, v_rport->rscn_position); + v_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_flag); + unf_rport_linkdown(v_lport, v_rport); + } else { + spin_unlock_irqrestore(&v_rport->rport_state_lock, rport_flag); + } + + return ret; +} + +static unsigned int unf_process_local_rport_after_rscn( + struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port) +{ + struct unf_rport_s *rport = NULL; + struct list_head *node = NULL; + struct unf_disc_s *disc = NULL; + unsigned long disc_flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3580, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3581, UNF_TRUE, v_sns_port, return UNF_RETURN_ERROR); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); + if (list_empty(&disc->list_busy_rports)) { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + return UNF_RETURN_ERROR; + } + + node = (&disc->list_busy_rports)->next; + + do { + rport = list_entry(node, struct unf_rport_s, entry_rport); + + if (rport->rscn_position == UNF_RPORT_NOT_NEED_PROCESS) { + node = node->next; + continue; + } else { + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + disc_flag); + ret = unf_process_each_rport_after_rscn(v_lport, + v_sns_port, + rport); + spin_lock_irqsave(&disc->rport_busy_pool_lock, + disc_flag); + node = (&disc->list_busy_rports)->next; + } + } while (node != &disc->list_busy_rports); + + unf_reset_rport_rscn_setting(v_lport); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); + + return ret; +} + +static unsigned int unf_handle_rscn_group_addr( + struct unf_port_id_page_s *v_port_id_page, + struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + struct unf_rport_s *sns_port = NULL; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3582, UNF_TRUE, v_port_id_page, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3583, UNF_TRUE, v_gid_acc_pld, + return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3584, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + + UNF_REFERNCE_VAR(v_port_id_page); + + sns_port = unf_get_rport_by_nport_id(v_lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) find fabric port failed", + v_lport->port_id); + + return UNF_RETURN_ERROR; + } + + unf_set_rport_rscn_position(v_lport, v_port_id_page); + unf_compare_disc_with_local_rport(v_lport, v_gid_acc_pld, + v_port_id_page); + + ret = unf_process_local_rport_after_rscn(v_lport, sns_port); + return ret; +} + +static void unf_handle_rscn_gid_acc(struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ + /* for N_Port_ID table return from RSCN */ + struct unf_port_id_page_s *port_id_page = NULL; + struct unf_rscn_mg_s *rscn_mgr = NULL; + struct list_head *list_node = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3585, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3586, UNF_TRUE, v_lport, return); + rscn_mgr = &v_lport->disc.rscn_mgr; + + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + while (!list_empty(&rscn_mgr->list_using_rscn_page)) { + /* + * for each RSCN_Using_Page(NPortID) + * for each L_Port->Disc->RSCN_Mgr-> + * RSCN_Using_Page(Port_ID_Page) + * NOTE: + * check using_page_port_id whether within + * GID_ACC payload or not + */ + list_node = (&rscn_mgr->list_using_rscn_page)->next; + port_id_page = list_entry(list_node, struct unf_port_id_page_s, + list_node_rscn); + /* NOTE: here delete node (from RSCN using Page) */ + list_del(list_node); + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); + + switch (port_id_page->uc_addr_format) { + /* each page of RSNC corresponding one of N_Port_ID */ + case UNF_RSCN_PORT_ADDR: + (void)unf_handle_rscn_port_addr(port_id_page, + v_gid_acc_pld, + v_lport); + break; + + /* each page of RSNC corresponding address group */ + case UNF_RSCN_AREA_ADDR_GROUP: + case UNF_RSCN_DOMAIN_ADDR_GROUP: + case UNF_RSCN_FABRIC_ADDR_GROUP: + (void)unf_handle_rscn_group_addr(port_id_page, + v_gid_acc_pld, + v_lport); + break; + + default: + break; + } + + /* NOTE: release this RSCN_Node */ + rscn_mgr->pfn_unf_release_rscn_node(rscn_mgr, port_id_page); + + /* go to next */ + spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); + } + spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); +} + +static void unf_gid_acc_handle(struct unf_gif_acc_pld_s *v_gid_acc_pld, + struct unf_lport_s *v_lport) +{ +#define UNF_NONE_DISC 0X0 /* before enter DISC */ + + struct unf_disc_s *disc = NULL; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3587, UNF_TRUE, v_gid_acc_pld, return); + UNF_CHECK_VALID(0x3588, UNF_TRUE, v_lport, return); + disc = &v_lport->disc; + + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + switch (disc->disc_option) { + case UNF_INIT_DISC: // from SCR callback with INI mode + disc->disc_option = UNF_NONE_DISC; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + /* R_Port from Disc_list */ + unf_handle_init_gid_acc(v_gid_acc_pld, v_lport); + break; + + case UNF_RSCN_DISC: /* from RSCN payload parse(analysis) */ + disc->disc_option = UNF_NONE_DISC; + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* R_Port from busy_list */ + unf_handle_rscn_gid_acc(v_gid_acc_pld, v_lport); + break; + + default: + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x)'s disc option(0x%x) is abnormal", + v_lport->port_id, + v_lport->nport_id, + disc->disc_option); + break; + } +} + +static void unf_gid_ft_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + struct unf_xchg_s *xchg = NULL; + union unf_sfs_u *sfs_ptr = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int rjt_reason = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3590, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3591, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3592, UNF_TRUE, v_xchg, return); + UNF_REFERNCE_VAR(v_rport); + + lport = (struct unf_lport_s *)v_lport; + xchg = (struct unf_xchg_s *)v_xchg; + disc = &lport->disc; + + sfs_ptr = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + gid_acc_pld = sfs_ptr->get_id.gid_rsp.gid_acc_pld; + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) GID_FT response payload is NULL", + lport->port_id); + + return; + } + + cmnd_rsp_size = (gid_acc_pld->ctiu_pream.cmnd_rsp_size); + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Process GID_FT ACC */ + unf_gid_acc_handle(gid_acc_pld, lport); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + rjt_reason = (gid_acc_pld->ctiu_pream.frag_reason_exp_vend); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) GID_FT was rejected with reason code(0x%x)", + lport->port_id, rjt_reason); + + if ((rjt_reason & UNF_CTIU_RJT_EXP_MASK) == + UNF_CTIU_RJT_EXP_FC4TYPE_NO_REG) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + unf_gid_acc_handle(gid_acc_pld, lport); + } else { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + } + } else { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery operation */ + unf_disc_error_recovery(lport); + } +} + +static void unf_gid_pt_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_disc_s *disc = NULL; + struct unf_gif_acc_pld_s *gid_acc_pld = NULL; + struct unf_xchg_s *xchg = NULL; + union unf_sfs_u *sfs_ptr = NULL; + unsigned int cmnd_rsp_size = 0; + unsigned int rjt_reason = 0; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3594, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3595, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3596, UNF_TRUE, v_xchg, return); + + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + disc = &lport->disc; + xchg = (struct unf_xchg_s *)v_xchg; + sfs_ptr = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + + gid_acc_pld = sfs_ptr->get_id.gid_rsp.gid_acc_pld; + if (!gid_acc_pld) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) GID_PT response payload is NULL", + lport->port_id); + + return; + } + + cmnd_rsp_size = (gid_acc_pld->ctiu_pream.cmnd_rsp_size); + if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + unf_gid_acc_handle(gid_acc_pld, lport); + } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { + rjt_reason = (gid_acc_pld->ctiu_pream.frag_reason_exp_vend); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) GID_PT was rejected with reason code(0x%x)", + lport->port_id, lport->nport_id, rjt_reason); + + if (UNF_CTIU_RJT_EXP_PORTTYPE_NO_REG == + (rjt_reason & UNF_CTIU_RJT_EXP_MASK)) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_SUCCESS); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, + flag); + + unf_gid_acc_handle(gid_acc_pld, lport); + } else { + ret = unf_send_gid_ft(lport, rport); + if (ret != RETURN_OK) { + spin_lock_irqsave(&disc->rport_busy_pool_lock, + flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore( + &disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery */ + unf_disc_error_recovery(lport); + } + } + } else { + spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); + unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); + spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); + + /* Do DISC recovery */ + unf_disc_error_recovery(lport); + } +} + +void unf_rcv_gnn_id_rsp_unknown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id) +{ + /* Send GFF_ID */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *sns_port = v_sns_port; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3606, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3607, UNF_TRUE, v_sns_port, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x_0x%x) Rportid(0x%x) GNN_ID response is unknown. Sending GFF_ID", + lport->port_id, lport->nport_id, v_nport_id); + + ret = unf_get_and_post_disc_event(lport, sns_port, v_nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, v_nport_id); + + /* NOTE: go to next stage */ + unf_rcv_gff_id_rsp_unknown(lport, v_nport_id); // send PLOGI + } +} + +void unf_rcv_gff_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + /* Send PLOGI */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + unsigned long flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3624, UNF_TRUE, v_lport, return); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) send GFF_ID for RPort(0x%x) but response is unknown", + lport->port_id, v_nport_id); + + /* Get (Safe) R_Port & Set State */ + rport = unf_get_rport_by_nport_id(lport, v_nport_id); + if (rport) + rport = unf_find_rport(lport, v_nport_id, rport->port_name); + + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) can't get RPort by NPort ID(0x%x), allocate new RPort", + lport->port_id, lport->nport_id, v_nport_id); + + rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, + v_nport_id); + UNF_CHECK_VALID(0x3619, UNF_TRUE, NULL != rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + } + + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + v_nport_id); + UNF_CHECK_VALID(0x3625, UNF_TRUE, rport, return); + + /* Update R_Port state: PLOGI_WAIT */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = v_nport_id; + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to send PLOGI */ + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x) can not send PLOGI for RPort(0x%x), enter recovery", + lport->port_id, v_nport_id); + + unf_rport_error_recovery(rport); + } +} + +static void unf_lport_update_nport_id(struct unf_lport_s *v_lport, + unsigned int v_nport_id) +{ + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3646, UNF_TRUE, v_lport, return); + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + v_lport->nport_id = v_nport_id; + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); +} + +static void unf_lport_update_time_params( + struct unf_lport_s *v_lport, + struct unf_flogi_payload_s *v_flogi_payload) +{ + unsigned long flag = 0; + unsigned int ed_tov = 0; + unsigned int ra_tov = 0; + + UNF_CHECK_VALID(0x3647, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3648, UNF_TRUE, v_flogi_payload, return); + + ed_tov = v_flogi_payload->fabric_parms.co_parms.e_d_tov; + ra_tov = v_flogi_payload->fabric_parms.co_parms.r_a_tov; + + spin_lock_irqsave(&v_lport->lport_state_lock, flag); + + /* FC-FS-3: 21.3.4, 21.3.5 */ + if ((v_lport->en_act_topo == UNF_ACT_TOP_P2P_FABRIC) || + (v_lport->en_act_topo == UNF_ACT_TOP_PUBLIC_LOOP)) { + v_lport->ed_tov = ed_tov; + v_lport->ra_tov = ra_tov; + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_EQUIP_ATT, + UNF_MAJOR, + "[info]Port(0x%x_0x%x) with topo(0x%x) no need to save time parameters", + v_lport->port_id, v_lport->nport_id, + v_lport->en_act_topo); + } + + spin_unlock_irqrestore(&v_lport->lport_state_lock, flag); +} + +static void unf_fdisc_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* Register to Name Server or Do recovery */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = NULL; + struct unf_flogi_payload_s *fdisc_pld = NULL; + unsigned long flag = 0; + unsigned int cmd = 0; + + lport = (struct unf_lport_s *)v_lport; + rport = (struct unf_rport_s *)v_rport; + xchg = (struct unf_xchg_s *)v_xchg; + UNF_CHECK_VALID(0x3640, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3641, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3642, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3643, UNF_TRUE, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + return); + fdisc_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->fdisc_acc.fdisc_payload; + if (xchg->byte_orders & UNF_BIT_2) + unf_big_end_to_cpu((unsigned char *)fdisc_pld, + sizeof(struct unf_flogi_payload_s)); + + cmd = fdisc_pld->cmnd; + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: FDISC response is (0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmd, lport->port_id, rport->nport_id, xchg->ox_id); + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + UNF_FC_FID_FLOGI); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) has no Rport", lport->port_id); + return; + } + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = UNF_FC_FID_FLOGI; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + if ((cmd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* Case for ACC */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) receive Flogi/Fdisc ACC in state(0x%x)", + lport->port_id, lport->nport_id, + lport->en_states); + + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + return; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + unf_lport_update_nport_id(lport, xchg->sid); + unf_lport_update_time_params(lport, fdisc_pld); + + unf_register_to_switch(lport); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: FDISC response is (0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmd, lport->port_id, rport->nport_id, + xchg->ox_id); + + /* Case for RJT: Do L_Port recovery */ + unf_lport_error_recovery(lport); + } +} + +static void unf_rcv_flogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_flogi_payload_s *v_flogi_pld, + unsigned int v_nport_id, + struct unf_xchg_s *v_xchg) +{ + /* PLOGI to Name server or remote port */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_flogi_payload_s *flogi_pld = v_flogi_pld; + struct unf_fabric_parms_s *fabric_params = NULL; + unsigned long long port_name = 0; + unsigned long long node_name = 0; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3649, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3650, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3651, UNF_TRUE, v_flogi_pld, return); + + /* Check L_Port state: FLOGI_WAIT */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_FLOGI_WAIT) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[info]Port(0x%x_0x%x) receive FLOGI ACC with state(0x%x)", + lport->port_id, lport->nport_id, lport->en_states); + + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + return; + } + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + fabric_params = &flogi_pld->fabric_parms; + node_name = (unsigned long long) + (((unsigned long long)(fabric_params->high_node_name) << 32) | + ((unsigned long long)(fabric_params->low_node_name))); + port_name = (unsigned long long) + (((unsigned long long)(fabric_params->high_port_name) << 32) | + ((unsigned long long)(fabric_params->low_port_name))); + + /* flogi acc pyload class 3 service priority value */ + lport->b_priority = UNF_PRIORITY_DISABLE; + + /* Save Flogi parameters */ + unf_save_fabric_params(lport, rport, fabric_params); + + if (UNF_CHECK_NPORT_FPORT_BIT(flogi_pld) == UNF_N_PORT) { + /* P2P Mode */ + unf_lport_update_topo(lport, UNF_ACT_TOP_P2P_DIRECT); + unf_login_with_rport_in_n2n(lport, port_name, node_name); + } else { + /* for: UNF_ACT_TOP_PUBLIC_LOOP + * /UNF_ACT_TOP_P2P_FABRIC/UNF_TOP_P2P_MASK + */ + if (lport->en_act_topo != UNF_ACT_TOP_PUBLIC_LOOP) + unf_lport_update_topo(lport, UNF_ACT_TOP_P2P_FABRIC); + + unf_lport_update_nport_id(lport, v_nport_id); + unf_lport_update_time_params(lport, flogi_pld); + + /* Save process both for Public loop & Fabric */ + unf_register_to_switch(lport); + } +} + +static void unf_flogi_acc_com_process(struct unf_xchg_s *v_xchg) +{ + /* Maybe within interrupt or thread context */ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_flogi_payload_s *flogi_pld = NULL; + unsigned int nport_id = 0; + unsigned int cmnd = 0; + unsigned long flags = 0; + struct unf_xchg_s *xchg = v_xchg; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, return); + + lport = xchg->lport; + rport = xchg->rport; + flogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi_acc.flogi_payload; + cmnd = flogi_pld->cmnd; + + /* Get N_Port_ID & R_Port */ + /* Others: 0xFFFFFE */ + rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); + nport_id = UNF_FC_FID_FLOGI; + + /* Get Safe R_Port: reuse only */ + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + nport_id); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) can not allocate new Rport", + lport->port_id); + + return; + } + + /* Update R_Port N_Port_ID */ + spin_lock_irqsave(&rport->rport_state_lock, flags); + /* Others: 0xFFFFFE */ + rport->nport_id = UNF_FC_FID_FLOGI; + spin_unlock_irqrestore(&rport->rport_state_lock, flags); + + /* Process FLOGI ACC or RJT */ + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: FLOGI response is(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmnd, lport->port_id, rport->nport_id, + xchg->ox_id); + + /* Case for ACC */ + unf_rcv_flogi_acc(lport, rport, flogi_pld, xchg->sid, xchg); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: FLOGI response is(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", + cmnd, lport->port_id, rport->nport_id, + xchg->ox_id); + + /* Case for RJT: do L_Port error recovery */ + unf_lport_error_recovery(lport); + } +} + +static int unf_rcv_flogi_acc_async_callback(void *v_arg_in, + void *v_arg_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_arg_in; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + unf_flogi_acc_com_process(xchg); + + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + return RETURN_OK; +} + +static void unf_flogi_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + /* Callback function for FLOGI ACC or RJT */ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_flogi_payload_s *flogi_pld = NULL; + int bbscn_enabled = UNF_FALSE; + enum unf_act_topo_e act_topo = UNF_ACT_TOP_UNKNOWN; + int switch_2_thread = UNF_FALSE; + + UNF_CHECK_VALID(0x3652, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3653, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3654, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3655, UNF_TRUE, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + return); + + xchg->lport = v_lport; + flogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi_acc.flogi_payload; + + if (xchg->byte_orders & UNF_BIT_2) + unf_big_end_to_cpu((unsigned char *)flogi_pld, + sizeof(struct unf_flogi_payload_s)); + + if ((lport->en_act_topo != UNF_ACT_TOP_PUBLIC_LOOP) && + (UNF_CHECK_NPORT_FPORT_BIT(flogi_pld) == UNF_F_PORT)) + /* Get Top Mode (P2P_F) --->>> used for BBSCN */ + act_topo = UNF_ACT_TOP_P2P_FABRIC; + + bbscn_enabled = unf_check_bbscn_is_enabled( + (unsigned char) + lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char) + UNF_GET_BB_SC_N_FROM_PARAMS(&flogi_pld->fabric_parms)); + if ((act_topo == UNF_ACT_TOP_P2P_FABRIC) && + (bbscn_enabled == UNF_TRUE)) { + /* BBSCN Enable or not --->>> used for Context change */ + lport->b_bbscn_support = UNF_TRUE; + switch_2_thread = UNF_TRUE; + } + + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) { + /* Wait for LR done sync: for Root Port */ + (void)unf_irq_process_switch_2_thread( + lport, xchg, + unf_rcv_flogi_acc_async_callback); + } else { + /* Process FLOGI response directly */ + unf_flogi_acc_com_process(xchg); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_ALL, + "[info]Port(0x%x) process FLOGI response: switch(%d) to thread done", + lport->port_id, switch_2_thread); +} + +struct unf_rport_s *unf_find_rport(struct unf_lport_s *v_lport, + unsigned int v_rport_nport_id, + unsigned long long v_port_name) +{ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = NULL; + + UNF_CHECK_VALID(0x3658, UNF_TRUE, v_lport, return NULL); + + if (v_rport_nport_id >= UNF_FC_FID_DOM_MGR) // N_Port_ID <---> SID + /* R_Port is Fabric: by N_Port_ID */ + rport = unf_get_rport_by_nport_id(lport, v_rport_nport_id); + else + /* Others: by WWPN & N_Port_ID */ + rport = unf_find_valid_rport(lport, v_port_name, + v_rport_nport_id); + + return rport; +} + +static void unf_rcv_plogi_acc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_lgn_parms_s *v_login_parms) +{ + /* PLOGI ACC: PRLI(non fabric) or RFT_ID(fabric) */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_lgn_parms_s *login_parms = v_login_parms; + unsigned long long node_name = 0; + unsigned long long port_name = 0; + unsigned long flag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3659, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3660, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3661, UNF_TRUE, v_login_parms, return); + + node_name = (unsigned long long) + (((unsigned long long)(login_parms->high_node_name) << 32) | + ((unsigned long long)(login_parms->low_node_name))); + port_name = (unsigned long long) + (((unsigned long long)(login_parms->high_port_name) << 32) | + ((unsigned long long)(login_parms->low_port_name))); + + /* ACC & Case for: R_Port is fabric (RFT_ID) */ + if (rport->nport_id >= UNF_FC_FID_DOM_MGR) { + /* Check L_Port state */ + spin_lock_irqsave(&lport->lport_state_lock, flag); + if (lport->en_states != UNF_LPORT_ST_PLOGI_WAIT) { + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) receive PLOGI ACC with error state(0x%x)", + v_lport->port_id, lport->en_states); + + return; + } + /* PLOGI_WAIT --> RFT_ID_WAIT */ + unf_lport_stat_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); + spin_unlock_irqrestore(&lport->lport_state_lock, flag); + + /* PLOGI parameters save */ + unf_save_plogi_params(lport, rport, login_parms, ELS_ACC); + + /* Update R_Port WWPN & WWNN */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->node_name = node_name; + rport->port_name = port_name; + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to Send RFT_ID */ + ret = unf_send_rft_id(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x) send RFT_ID failed", + v_lport->port_id); + + unf_lport_error_recovery(lport); + } + } else { + /* ACC & Case for: R_Port is not fabric */ + if ((rport->options == UNF_PORT_MODE_UNKNOWN) && + (rport->port_name != INVALID_WWPN)) + rport->options = unf_get_port_feature(port_name); + /* Set Port Feature with BOTH: cancel */ + + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->node_name = node_name; + rport->port_name = port_name; + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_INFO, + "[info]LOGIN: Port(0x%x)<---LS_ACC(DID:0x%x SID:0x%x) for PLOGI ACC with RPort state(0x%x) NodeName(0x%llx) E_D_TOV(%d)", + lport->port_id, lport->nport_id, + rport->nport_id, rport->rp_state, + rport->node_name, rport->ed_tov); + + if ((lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) && + ((rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) || + (rport->rp_state == UNF_RPORT_ST_READY))) { + /* Do nothing, return directly */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + return; + } + + /* PRLI_WAIT */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PRLI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* PLOGI parameters save */ + unf_save_plogi_params(lport, rport, login_parms, ELS_ACC); + + /* + * Need Delay to Send PRLI or not + * Used for: L_Port with INI mode & R_Port is not Fabric + */ + unf_check_rport_need_delay_prli(lport, rport, + rport->options); + + /* Do not care: Just used for L_Port only is + * TGT mode or R_Port only is INI mode + */ + unf_schedule_open_work(lport, rport); + } +} + +static void unf_plogi_acc_com_process(struct unf_xchg_s *v_xchg) +{ + struct unf_lport_s *lport = NULL; + struct unf_rport_s *rport = NULL; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_plogi_payload_s *plogi_pld = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + unsigned long flag = 0; + unsigned long long port_name = 0; + unsigned int rport_nport_id = 0; + unsigned int cmnd = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->lport, return); + UNF_CHECK_VALID(INVALID_VALUE32, UNF_TRUE, xchg->rport, return); + + lport = xchg->lport; + rport = xchg->rport; + rport_nport_id = rport->nport_id; + plogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi_acc.payload; + login_parms = &plogi_pld->parms; + cmnd = (plogi_pld->cmnd); + + if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { + /* Case for PLOGI ACC: Go to next stage */ + port_name = (unsigned long long) + (((unsigned long long)(login_parms->high_port_name) << 32) | + ((unsigned long long)(login_parms->low_port_name))); + + /* Get (new) R_Port: 0xfffffc has same WWN with 0xfffcxx */ + rport = unf_find_rport(lport, rport_nport_id, port_name); + rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, + rport_nport_id); + if (unlikely(!rport)) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x_0x%x) alloc new RPort with wwpn(0x%llx) failed", + lport->port_id, lport->nport_id, + port_name); + return; + } + + /* PLOGI parameters check */ + ret = unf_check_plogi_params(lport, rport, login_parms); + if (ret != RETURN_OK) + return; + + /* Update R_Port state */ + spin_lock_irqsave(&rport->rport_state_lock, flag); + rport->nport_id = rport_nport_id; + /* --->>> PLOGI_WAIT */ + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Start to process PLOGI ACC */ + unf_rcv_plogi_acc(lport, rport, login_parms); + } else { + /* Case for PLOGI RJT: L_Port or R_Port recovery */ + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]LOGIN: Port(0x%x)<---RPort(0x%p) with LS_RJT(DID:0x%x SID:0x%x) for PLOGI", + lport->port_id, rport, lport->nport_id, + rport->nport_id); + + if (rport->nport_id >= UNF_FC_FID_DOM_MGR) + /* for Name server */ + unf_lport_error_recovery(lport); + else + unf_rport_error_recovery(rport); + } + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]LOGIN: PLOGI response(0x%x). Port(0x%x_0x%x)<---RPort(0x%x_0x%p) wwpn(0x%llx) OX_ID(0x%x)", + cmnd, lport->port_id, lport->nport_id, rport->nport_id, + rport, port_name, xchg->ox_id); +} + +static int unf_rcv_plogi_acc_async_callback(void *v_argc_in, void *v_argc_out) +{ + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_argc_in; + + UNF_CHECK_VALID(0x2267, UNF_TRUE, xchg, return UNF_RETURN_ERROR); + + unf_plogi_acc_com_process(xchg); + + unf_xchg_ref_dec(xchg, SFS_RESPONSE); + + return RETURN_OK; +} + +static void unf_plogi_callback(void *v_lport, void *v_rport, void *v_xchg) +{ + struct unf_lport_s *lport = (struct unf_lport_s *)v_lport; + struct unf_xchg_s *xchg = (struct unf_xchg_s *)v_xchg; + struct unf_plogi_payload_s *plogi_pld = NULL; + struct unf_lgn_parms_s *login_parms = NULL; + int bbscn_enabled = UNF_FALSE; + int switch_2_thread = UNF_FALSE; + + UNF_CHECK_VALID(0x3662, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3663, UNF_TRUE, v_rport, return); + UNF_CHECK_VALID(0x3664, UNF_TRUE, v_xchg, return); + UNF_CHECK_VALID(0x3665, UNF_TRUE, + xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, + return); + + plogi_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi_acc.payload; + login_parms = &plogi_pld->parms; + xchg->lport = v_lport; + + if (xchg->byte_orders & UNF_BIT_2) + unf_big_end_to_cpu((unsigned char *)plogi_pld, + sizeof(struct unf_plogi_payload_s)); + + bbscn_enabled = unf_check_bbscn_is_enabled( + (unsigned char)lport->low_level_func.lport_cfg_items.bb_scn, + (unsigned char)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); + if ((bbscn_enabled == UNF_TRUE) && + (lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT)) { + switch_2_thread = UNF_TRUE; + lport->b_bbscn_support = UNF_TRUE; + } + + if ((switch_2_thread == UNF_TRUE) && (lport->root_lport == lport)) { + /* Wait for LR done sync: just for ROOT Port */ + (void)unf_irq_process_switch_2_thread( + lport, xchg, + unf_rcv_plogi_acc_async_callback); + } else { + unf_plogi_acc_com_process(xchg); + } +} + +static void unf_process_logo_in_pri_loop(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send PLOGI or LOGO */ + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + + UNF_CHECK_VALID(0x3666, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3667, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); /* PLOGI WAIT */ + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + /* Private Loop with INI mode, Avoid COM Mode problem */ + unf_rport_delay_login(rport); +} + +static void unf_process_logo_in_n2n(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send PLOGI or LOGO */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + unsigned long flag = 0; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3668, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3669, UNF_TRUE, v_rport, return); + + spin_lock_irqsave(&rport->rport_state_lock, flag); + + unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); // PLOGI WAIT + spin_unlock_irqrestore(&rport->rport_state_lock, flag); + + if (lport->port_name > rport->port_name) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, + UNF_MAJOR, + "[info]Port(0x%x)'s WWN(0x%llx) is larger than(0x%llx), should be master", + lport->port_id, lport->port_name, + rport->port_name); + + ret = unf_send_plogi(lport, rport); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, + UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]LOGIN: Port(0x%x) send PLOGI failed, enter recovery", + v_lport->port_id); + + unf_rport_error_recovery(rport); + } + } else { + unf_rport_enter_logo(lport, rport); + } +} + +void unf_process_logo_in_fabric(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* Send GFF_ID or LOGO */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + struct unf_rport_s *sns_port = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3670, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3671, UNF_TRUE, v_rport, return); + + /* L_Port with INI Mode: Send GFF_ID */ + sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); + if (!sns_port) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, + UNF_WARN, + "[warn]Port(0x%x) can't find fabric port", + lport->port_id); + return; + } + + ret = unf_get_and_post_disc_event(v_lport, sns_port, rport->nport_id, + UNF_DISC_GET_FEATURE); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", + lport->port_id, UNF_DISC_GET_FEATURE, + rport->nport_id); + + unf_rcv_gff_id_rsp_unknown(lport, rport->nport_id); + } +} + +static void unf_process_rport_after_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport) +{ + /* + * 1. LOGO handler + * 2. RPLO handler + * 3. LOGO_CALL_BACK (send LOGO ACC) handler + */ + struct unf_lport_s *lport = v_lport; + struct unf_rport_s *rport = v_rport; + + UNF_CHECK_VALID(0x3672, UNF_TRUE, v_lport, return); + UNF_CHECK_VALID(0x3673, UNF_TRUE, v_rport, return); + + if (rport->nport_id < UNF_FC_FID_DOM_MGR) { + /* R_Port is not fabric port (retry LOGIN or LOGO) */ + if (lport->en_act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { + /* Private Loop: PLOGI or LOGO */ + unf_process_logo_in_pri_loop(lport, rport); + } else if (lport->en_act_topo == UNF_ACT_TOP_P2P_DIRECT) { + /* Point to Point: LOGIN or LOGO */ + unf_process_logo_in_n2n(lport, rport); + } else { + /* Fabric or Public Loop: GFF_ID or LOGO */ + unf_process_logo_in_fabric(lport, rport); + } + } else { + /* Rport is fabric port: link down now */ + unf_rport_linkdown(lport, rport); + } +} + +static unsigned int unf_rcv_bls_req_done(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + /* + * About I/O resource: + * 1. normal: Release I/O resource during RRQ processer + * 2. exception: Release I/O resource immediately + */ + struct unf_xchg_s *xchg = NULL; + unsigned short hot_pool_tag = 0; + unsigned long flags = 0; + unsigned long time_ms = 0; + unsigned int ret = RETURN_OK; + struct unf_lport_s *lport = NULL; + + UNF_CHECK_VALID(0x3723, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3724, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + lport = v_lport; + + /* 1. BLS Request Response: Hot Pool Tag --->>> OX_ID */ + hot_pool_tag = + (unsigned short)v_pkg->private[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]; + xchg = (struct unf_xchg_s *)unf_cm_lookup_xchg_by_tag( + (void *)lport, hot_pool_tag); + if (!xchg) { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) can't find exchange by tag(0x%x) when receiving ABTS response", + lport->port_id, hot_pool_tag); + + /* return directly */ + return UNF_RETURN_ERROR; + } + + /* Consistency check */ + UNF_CHECK_ALLOCTIME_VALID(v_lport, hot_pool_tag, xchg, + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + /* 2. Increase ref_cnt for exchange protecting */ + + ret = unf_xchg_ref_inc(xchg, TGT_ABTS_DONE); /* hold */ + UNF_CHECK_VALID(0x3725, UNF_TRUE, (ret == RETURN_OK), + return UNF_RETURN_ERROR); + + /* 3. Exchag I/O State Set & Check: reused */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + xchg->io_state |= INI_IO_STATE_DONE; /* I/O Done */ + xchg->abts_state |= ABTS_RESPONSE_RECEIVED; + if (!(xchg->io_state & INI_IO_STATE_UPABORT)) { + /* NOTE: I/O exchange has been released and used again */ + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x_0x%x) SID(0x%x) exch(0x%p) (0x%x:0x%x:0x%x:0x%x) state(0x%x) is abnormal with cnt(0x%x)", + lport->port_id, lport->nport_id, + xchg->sid, xchg, xchg->hot_pool_tag, + xchg->ox_id, xchg->rx_id, xchg->oid, + xchg->io_state, + atomic_read(&xchg->ref_cnt)); + + /* return directly */ + /* cancel ref & do nothing */ + unf_xchg_ref_dec(xchg, TGT_ABTS_DONE); + return UNF_RETURN_ERROR; + } + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* 4. Exchange Timer check, cancel if necessary */ + lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer((void *)xchg); + + /* + * 5. Exchage I/O Status check: Succ-> Add RRQ Timer + * ***** pkg->status --- to --->>> scsi_cmnd->result ***** + * + * FAILED: ERR_Code or X_ID is err, or BA_RSP type is err + */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if (v_pkg->status == UNF_IO_SUCCESS) { + /* Succeed: PKG status -->> EXCH status -->> scsi status */ + UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS); + xchg->io_state |= INI_IO_STATE_WAIT_RRQ; + xchg->rx_id = UNF_GET_RXID(v_pkg); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* Add RRQ timer */ + time_ms = (unsigned long)(lport->ra_tov); + lport->xchg_mgr_temp.pfn_unf_xchg_add_timer( + (void *)xchg, + time_ms, + UNF_TIMER_TYPE_INI_RRQ); + } else { + /* Failed: PKG status -->> EXCH status -->> scsi status */ + UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_FAILED); + if (MARKER_STS_RECEIVED & xchg->abts_state) { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* NOTE: release I/O resource immediately */ + unf_cm_free_xchg(lport, xchg); + } else { + UNF_TRACE(UNF_EVTLOG_IO_WARN, UNF_LOG_IO_ATT, UNF_WARN, + "[warn]Port(0x%x) exch(0x%p) OX_RX(0x%x:0x%x) IOstate(0x%x) ABTSstate(0x%x) receive response abnormal ref(0x%x)", + lport->port_id, xchg, xchg->ox_id, + xchg->rx_id, + xchg->io_state, xchg->abts_state, + atomic_read(&xchg->ref_cnt)); + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + } + + /* + * 6. If abts response arrived before + * marker sts received just wake up abts marker sema + */ + spin_lock_irqsave(&xchg->xchg_state_lock, flags); + if (!(xchg->abts_state & MARKER_STS_RECEIVED)) { + xchg->ucode_abts_state = v_pkg->status; + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + + /* NOTE: wake up semaphore */ + up(&xchg->task_sema); + } else { + spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); + } + + /* 7. dec exch ref_cnt */ + unf_xchg_ref_dec(xchg, TGT_ABTS_DONE); + return ret; +} + +static unsigned int unf_rcv_abort_ini_io_done(struct unf_lport_s *v_lport, + struct unf_frame_pkg_s *v_pkg) +{ + /* INI mode: do not care */ + struct unf_xchg_s *io_xchg = NULL; + unsigned short io_pool_tag = 0; + unsigned int ret = RETURN_OK; + + UNF_CHECK_VALID(0x3735, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3736, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + + io_pool_tag = UNF_GET_IO_XCHG_TAG(v_pkg); + io_xchg = (struct unf_xchg_s *)unf_cm_lookup_xchg_by_tag( + (void *)v_lport, + io_pool_tag); + if (io_xchg) { + UNF_CHECK_ALLOCTIME_VALID( + v_lport, io_pool_tag, io_xchg, + v_pkg->private[PKG_PRIVATE_XCHG_ALLOC_TIME], + io_xchg->private[PKG_PRIVATE_XCHG_ALLOC_TIME]); + + /* 1. Timer release */ + v_lport->xchg_mgr_temp.pfn_unf_xchg_cancel_timer( + (void *)io_xchg); + + UNF_TRACE(UNF_EVTLOG_IO_INFO, UNF_LOG_IO_ATT, UNF_MAJOR, + "[info]Port(0x%x) abort INI IO with status(0x%x) exchange(0x%p) tag(0x%x)", + v_lport->port_id, v_pkg->status, + io_xchg, io_pool_tag); + + /* 2. Free I/O Exchange context */ + unf_cm_free_xchg((void *)v_lport, (void *)io_xchg); + } + + return ret; +} + +unsigned int unf_receive_bls_pkg(void *v_lport, struct unf_frame_pkg_s *v_pkg) +{ + struct unf_lport_s *lport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + lport = (struct unf_lport_s *)v_lport; + UNF_CHECK_VALID(0x3730, UNF_TRUE, lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3731, UNF_TRUE, v_pkg, return UNF_RETURN_ERROR); + + if (v_pkg->type == UNF_PKG_BLS_REQ_DONE) { + /* INI: RCVD BLS Req Done */ + ret = unf_rcv_bls_req_done(v_lport, v_pkg); + } else if (v_pkg->type == UNF_PKG_INI_IO) { + /* INI: Abort Done (do not care) */ + ret = unf_rcv_abort_ini_io_done(v_lport, v_pkg); + } else { + UNF_TRACE(UNF_EVTLOG_DRIVER_ERR, UNF_LOG_LOGIN_ATT, UNF_ERR, + "[err]Port(0x%x) received BLS packet type(%xh) is error", + lport->port_id, v_pkg->type); + + return UNF_RETURN_ERROR; + } + + UNF_REFERNCE_VAR(lport); + + return ret; +} + +static void unf_fill_rls_acc_pld(struct unf_rls_acc_s *v_rls_acc, + struct unf_lport_s *v_lport) +{ + struct unf_rls_acc_payload_s *rls_acc_pld = NULL; + + rls_acc_pld = &v_rls_acc->rls; + rls_acc_pld->cmnd = UNF_ELS_CMND_ACC; + + rls_acc_pld->link_failure_count = + v_lport->err_code_sum.link_fail_count; + rls_acc_pld->loss_of_sync_count = + v_lport->err_code_sum.loss_of_sync_count; + rls_acc_pld->loss_of_signal_count = + v_lport->err_code_sum.loss_of_signal_count; + rls_acc_pld->primitive_seq_count = 0; + rls_acc_pld->invalid_trans_word_count = 0; + rls_acc_pld->invalid_crc_count = + v_lport->err_code_sum.bad_crc_count; +} + +static unsigned int unf_send_rls_acc(struct unf_lport_s *v_lport, + unsigned int v_did, + struct unf_xchg_s *v_xchg) +{ + struct unf_rls_acc_s *rls_acc = NULL; + union unf_sfs_u *fc_entry = NULL; + unsigned int ret = UNF_RETURN_ERROR; + unsigned short ox_id = 0; + unsigned short rx_id = 0; + struct unf_frame_pkg_s pkg; + + memset(&pkg, 0, sizeof(struct unf_frame_pkg_s)); + v_xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RLS); + v_xchg->did = v_did; + v_xchg->sid = v_lport->nport_id; + v_xchg->oid = v_xchg->sid; + v_xchg->lport = v_lport; + + v_xchg->pfn_callback = NULL; + v_xchg->pfn_ob_callback = NULL; + + unf_fill_package(&pkg, v_xchg, v_xchg->rport); + + fc_entry = v_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; + if (!fc_entry) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", + v_lport->port_id, v_xchg->hot_pool_tag); + return UNF_RETURN_ERROR; + } + + rls_acc = &fc_entry->rls_acc; + unf_fill_rls_acc_pld(rls_acc, v_lport); + ox_id = v_xchg->ox_id; + rx_id = v_xchg->rx_id; + + ret = unf_els_cmnd_send(v_lport, &pkg, v_xchg); + + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_MAJOR, + "[info]Port(0x%x) send Rls acc %s to RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x).", + v_lport->port_id, (ret != RETURN_OK) ? "failed" : "succeed", + v_did, ox_id, rx_id); + + UNF_REFERNCE_VAR(ox_id); + UNF_REFERNCE_VAR(rx_id); + return ret; +} + +static unsigned int unf_rls_handler(struct unf_lport_s *v_lport, + unsigned int v_sid, + struct unf_xchg_s *v_xchg) +{ + struct unf_rport_s *rport = NULL; + unsigned int ret = UNF_RETURN_ERROR; + + UNF_CHECK_VALID(0x3483, UNF_TRUE, v_lport, return UNF_RETURN_ERROR); + UNF_CHECK_VALID(0x3484, UNF_TRUE, v_xchg, return UNF_RETURN_ERROR); + + UNF_SERVICE_COLLECT(v_lport->link_service_info, UNF_SERVICE_ITEM_RLS); + + rport = unf_get_rport_by_nport_id(v_lport, v_sid); + if (!rport) { + UNF_TRACE(UNF_EVTLOG_DRIVER_INFO, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn] Port(0x%x_0x%x) can`t find RPort by sid(0x%x) OX_ID(0x%x)", + v_lport->port_id, v_lport->nport_id, v_sid, + v_xchg->ox_id); + unf_cm_free_xchg(v_lport, v_xchg); + return ret; + } + v_xchg->rport = rport; + + ret = unf_send_rls_acc(v_lport, v_sid, v_xchg); + if (ret != RETURN_OK) { + UNF_TRACE(UNF_EVTLOG_DRIVER_WARN, UNF_LOG_LOGIN_ATT, UNF_WARN, + "[warn]Port(0x%x) send RLS ACC failed", + v_lport->port_id); + unf_cm_free_xchg(v_lport, v_xchg); + } + + return ret; +} + diff --git a/drivers/scsi/huawei/hifc/unf_service.h b/drivers/scsi/huawei/hifc/unf_service.h new file mode 100644 index 000000000000..868723128575 --- /dev/null +++ b/drivers/scsi/huawei/hifc/unf_service.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Huawei Hifc PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + */ +#ifndef __UNF_SERVICE_H__ +#define __UNF_SERVICE_H__ + +extern unsigned int max_frame_size; + +#define UNF_SET_ELS_ACC_TYPE(v_els_cmd) \ + ((unsigned int)(v_els_cmd) << 16 | ELS_ACC) +#define UNF_SET_ELS_RJT_TYPE(v_els_cmd) \ + ((unsigned int)(v_els_cmd) << 16 | ELS_RJT) + +unsigned int unf_send_gid_ft(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_gid_pt(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_gpn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +unsigned int unf_send_gnn_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +unsigned int unf_send_gff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +unsigned int unf_send_flogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_fdisc(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_plogi(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_prli(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_receive_els_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +unsigned int unf_send_rff_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_rft_id(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_logo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_echo(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + unsigned int *v_time); +unsigned int unf_send_abts(struct unf_lport_s *v_lport, + struct unf_xchg_s *v_xchg); +unsigned int unf_send_scr(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_send_rrq(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg); +void unf_rport_immediate_linkdown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); +unsigned int unf_receive_bls_pkg(void *v_lport, + struct unf_frame_pkg_s *v_pkg); +struct unf_rport_s *unf_find_rport(struct unf_lport_s *v_lport, + unsigned int v_rport_nport_id, + unsigned long long v_port_name); +void unf_login_with_loop_node(struct unf_lport_s *v_lport, unsigned int alpa); +unsigned int unf_receive_gs_pkg(void *v_lport, + struct unf_frame_pkg_s *v_fra_pkg); +void unf_rcv_gnn_id_rsp_unknown(struct unf_lport_s *v_lport, + struct unf_rport_s *v_sns_port, + unsigned int v_nport_id); +void unf_rcv_gpn_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id); +void unf_rcv_gff_id_rsp_unknown(struct unf_lport_s *v_lport, + unsigned int v_nport_id); +unsigned int unf_release_rport_res(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport); + +unsigned int unf_low_level_bbscn(struct unf_lport_s *v_lport); +unsigned int unf_send_els_done(void *v_lport, struct unf_frame_pkg_s *v_pkg); +unsigned int unf_send_rec(struct unf_lport_s *v_lport, + struct unf_rport_s *v_rport, + struct unf_xchg_s *v_xchg); + +typedef int (*unf_evt_task)(void *v_arg_in, void *v_arg_out); + +#endif /* __UNF_SERVICE_H__ */